Merge branch 'i2c/for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux

Pull more i2c updates from Wolfram Sang:
 "Included are two bugfixes needing some bigger refactoring (sh_mobile:
  deferred probe with DMA, mv64xxx: fix offload support) and one
  deprecated driver removal I thought would go in via ppc but I
  misunderstood.  It has a proper ack from BenH"

* 'i2c/for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux:
  i2c: sh_mobile: fix uninitialized var when debug is enabled
  macintosh: therm_pm72: delete deprecated driver
  i2c: sh_mobile: I2C_SH_MOBILE should depend on HAS_DMA
  i2c: sh_mobile: rework deferred probing
  i2c: sh_mobile: refactor DMA setup
  i2c: mv64xxx: rework offload support to fix several problems
  i2c: mv64xxx: use BIT() macro for register value definitions
diff --git a/.gitignore b/.gitignore
index e213b27..ce57b79 100644
--- a/.gitignore
+++ b/.gitignore
@@ -96,3 +96,6 @@
 
 # Kconfig presets
 all.config
+
+# Kdevelop4
+*.kdev4
diff --git a/.mailmap b/.mailmap
index 1ad6873..ada8ad6 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,7 +17,7 @@
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andreas Herrmann <aherrman@de.ibm.com>
-Andrew Morton <akpm@osdl.org>
+Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
 Archit Taneja <archit@ti.com>
@@ -102,6 +102,8 @@
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
 Sam Ravnborg <sam@mars.ravnborg.org>
+Santosh Shilimkar <ssantosh@kernel.org>
+Santosh Shilimkar <santosh.shilimkar@oracle.org>
 Sascha Hauer <s.hauer@pengutronix.de>
 S.Ça─člar Onur <caglar@pardus.org.tr>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
diff --git a/CREDITS b/CREDITS
index c56d8aa..96935df 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1734,14 +1734,14 @@
 S: USA
 
 N: Dave Jones
-E: davej@redhat.com
+E: davej@codemonkey.org.uk
 W: http://www.codemonkey.org.uk
 D: Assorted VIA x86 support.
 D: 2.5 AGPGART overhaul.
 D: CPUFREQ maintenance.
-D: Fedora kernel maintenance.
+D: Fedora kernel maintenance (2003-2014).
+D: 'Trinity' and similar fuzz testing work.
 D: Misc/Other.
-S: 314 Littleton Rd, Westford, MA 01886, USA
 
 N: Martin Josfsson
 E: gandalf@wlug.westbo.se
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
new file mode 100644
index 0000000..7969443
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dell-laptop
@@ -0,0 +1,60 @@
+What:		/sys/class/leds/dell::kbd_backlight/als_setting
+Date:		December 2014
+KernelVersion:	3.19
+Contact:	Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+		Pali Rohár <pali.rohar@gmail.com>
+Description:
+		This file allows to control the automatic keyboard
+		illumination mode on some systems that have an ambient
+		light sensor. Write 1 to this file to enable the auto
+		mode, 0 to disable it.
+
+What:		/sys/class/leds/dell::kbd_backlight/start_triggers
+Date:		December 2014
+KernelVersion:	3.19
+Contact:	Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+		Pali Rohár <pali.rohar@gmail.com>
+Description:
+		This file allows to control the input triggers that
+		turn on the keyboard backlight illumination that is
+		disabled because of inactivity.
+		Read the file to see the triggers available. The ones
+		enabled are preceded by '+', those disabled by '-'.
+
+		To enable a trigger, write its name preceded by '+' to
+		this file. To disable a trigger, write its name preceded
+		by '-' instead.
+
+		For example, to enable the keyboard as trigger run:
+		    echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+		To disable it:
+		    echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
+
+		Note that not all the available triggers can be configured.
+
+What:		/sys/class/leds/dell::kbd_backlight/stop_timeout
+Date:		December 2014
+KernelVersion:	3.19
+Contact:	Gabriele Mazzotta <gabriele.mzt@gmail.com>,
+		Pali Rohár <pali.rohar@gmail.com>
+Description:
+		This file allows to specify the interval after which the
+		keyboard illumination is disabled because of inactivity.
+		The timeouts are expressed in seconds, minutes, hours and
+		days, for which the symbols are 's', 'm', 'h' and 'd'
+		respectively.
+
+		To configure the timeout, write to this file a value along
+		with any the above units. If no unit is specified, the value
+		is assumed to be expressed in seconds.
+
+		For example, to set the timeout to 10 minutes run:
+		    echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
+
+		Note that when this file is read, the returned value might be
+		expressed in a different unit than the one used when the timeout
+		was set.
+
+		Also note that only some timeouts are supported and that
+		some systems might fall back to a specific timeout in case
+		an invalid timeout is written to this file.
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 0a2debf..350dfb3 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2579,6 +2579,18 @@
       </orderedlist>
     </section>
 
+    <section>
+      <title>V4L2 in Linux 3.19</title>
+      <orderedlist>
+	<listitem>
+	  <para>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding;
+and &v4l2-quantization; fields to &v4l2-pix-format;, &v4l2-pix-format-mplane;
+and &v4l2-mbus-framefmt;.
+	  </para>
+	</listitem>
+      </orderedlist>
+    </section>
+
     <section id="other">
       <title>Relation of V4L2 to other Linux multimedia APIs</title>
 
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index ccf6053..d5eca4b 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -138,9 +138,25 @@
 	<row>
 	  <entry>__u32</entry>
 	  <entry><structfield>flags</structfield></entry>
-	    <entry>Flags set by the application or driver, see <xref
+	  <entry>Flags set by the application or driver, see <xref
 linkend="format-flags" />.</entry>
 	</row>
+	<row>
+	  <entry>&v4l2-ycbcr-encoding;</entry>
+	  <entry><structfield>ycbcr_enc</structfield></entry>
+	  <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+	</row>
+	<row>
+	  <entry>&v4l2-quantization;</entry>
+	  <entry><structfield>quantization</structfield></entry>
+	  <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+	</row>
       </tbody>
     </tgroup>
   </table>
@@ -232,9 +248,25 @@
 	  <entry>Flags set by the application or driver, see <xref
 linkend="format-flags" />.</entry>
 	</row>
+	<row>
+	  <entry>&v4l2-ycbcr-encoding;</entry>
+	  <entry><structfield>ycbcr_enc</structfield></entry>
+	  <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+	</row>
+	<row>
+	  <entry>&v4l2-quantization;</entry>
+	  <entry><structfield>quantization</structfield></entry>
+	  <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+	</row>
         <row>
           <entry>__u8</entry>
-          <entry><structfield>reserved[10]</structfield></entry>
+          <entry><structfield>reserved[8]</structfield></entry>
           <entry>Reserved for future extensions. Should be zeroed by the
            application.</entry>
         </row>
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index 18730b9..c5ea868 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -34,8 +34,24 @@
 	  <xref linkend="colorspaces" /> for details.</entry>
 	</row>
 	<row>
+	  <entry>&v4l2-ycbcr-encoding;</entry>
+	  <entry><structfield>ycbcr_enc</structfield></entry>
+	  <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+	</row>
+	<row>
+	  <entry>&v4l2-quantization;</entry>
+	  <entry><structfield>quantization</structfield></entry>
+	  <entry>This information supplements the
+<structfield>colorspace</structfield> and must be set by the driver for
+capture streams and by the application for output streams,
+see <xref linkend="colorspaces" />.</entry>
+	</row>
+	<row>
 	  <entry>__u32</entry>
-	  <entry><structfield>reserved</structfield>[7]</entry>
+	  <entry><structfield>reserved</structfield>[6]</entry>
 	  <entry>Reserved for future extensions. Applications and drivers must
 	  set the array to zero.</entry>
 	</row>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 7cfe618..ac0f8d9 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -152,6 +152,15 @@
 applications. -->
 
       <revision>
+	<revnumber>3.19</revnumber>
+	<date>2014-12-05</date>
+	<authorinitials>hv</authorinitials>
+	<revremark>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding; and &v4l2-quantization; fields
+to &v4l2-pix-format;, &v4l2-pix-format-mplane; and &v4l2-mbus-framefmt;.
+	</revremark>
+      </revision>
+
+      <revision>
 	<revnumber>3.17</revnumber>
 	<date>2014-08-04</date>
 	<authorinitials>lp, hv</authorinitials>
@@ -539,7 +548,7 @@
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.17</subtitle>
+ <subtitle>Revision 3.19</subtitle>
 
   <chapter id="common">
     &sub-common;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-opal.txt b/Documentation/devicetree/bindings/i2c/i2c-opal.txt
new file mode 100644
index 0000000..12bc614
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-opal.txt
@@ -0,0 +1,37 @@
+Device-tree bindings for I2C OPAL driver
+----------------------------------------
+
+Most of the device node and properties layout is specific to the firmware and
+used by the firmware itself for configuring the port. From the linux
+perspective, the properties of use are "ibm,port-name" and "ibm,opal-id".
+
+Required properties:
+
+- reg: Port-id within a given master
+- compatible: must be "ibm,opal-i2c"
+- ibm,opal-id: Refers to a specific bus and used to identify it when calling
+	       the relevant OPAL functions.
+- bus-frequency: Operating frequency of the i2c bus (in HZ). Informational for
+		 linux, used by the FW though.
+
+Optional properties:
+- ibm,port-name: Firmware provides this name that uniquely identifies the i2c
+		 port.
+
+The node contains a number of other properties that are used by the FW itself
+and depend on the specific hardware implementation. The example below depicts
+a P8 on-chip bus.
+
+Example:
+
+i2c-bus@0 {
+	reg = <0x0>;
+	bus-frequency = <0x61a80>;
+	compatible = "ibm,power8-i2c-port", "ibm,opal-i2c";
+	ibm,opal-id = <0x1>;
+	ibm,port-name = "p8_00000000_e1p0";
+	#address-cells = <0x1>;
+	phandle = <0x10000006>;
+	#size-cells = <0x0>;
+	linux,phandle = <0x10000006>;
+};
diff --git a/Documentation/devicetree/bindings/input/cap1106.txt b/Documentation/devicetree/bindings/input/cap11xx.txt
similarity index 64%
rename from Documentation/devicetree/bindings/input/cap1106.txt
rename to Documentation/devicetree/bindings/input/cap11xx.txt
index 4b46390..7d0a300 100644
--- a/Documentation/devicetree/bindings/input/cap1106.txt
+++ b/Documentation/devicetree/bindings/input/cap11xx.txt
@@ -1,14 +1,16 @@
-Device tree bindings for Microchip CAP1106, 6 channel capacitive touch sensor
+Device tree bindings for Microchip CAP11xx based capacitive touch sensors
 
-The node for this driver must be a child of a I2C controller node, as the
+The node for this device must be a child of a I2C controller node, as the
 device communication via I2C only.
 
 Required properties:
 
-	compatible:		Must be "microchip,cap1106"
+	compatible:		Must contain one of:
+					"microchip,cap1106"
+					"microchip,cap1126"
+					"microchip,cap1188"
 
 	reg:			The I2C slave address of the device.
-				Only 0x28 is valid.
 
 	interrupts:		Property describing the interrupt line the
 				device's ALERT#/CM_IRQ# pin is connected to.
@@ -26,6 +28,10 @@
 				Valid values are 1, 2, 4, and 8.
 				By default, a gain of 1 is set.
 
+	microchip,irq-active-high:	By default the interrupt pin is active low
+				open drain. This property allows using the active
+				high push-pull output.
+
 	linux,keycodes:		Specifies an array of numeric keycode values to
 				be used for the channels. If this property is
 				omitted, KEY_A, KEY_B, etc are used as
@@ -43,11 +49,11 @@
 		autorepeat;
 		microchip,sensor-gain = <2>;
 
-		linux,keycodes = <103		/* KEY_UP */
-				  106		/* KEY_RIGHT */
-				  108		/* KEY_DOWN */
-				  105		/* KEY_LEFT */
-				  109		/* KEY_PAGEDOWN */
-				  104>;		/* KEY_PAGEUP */
+		linux,keycodes = <103>,		/* KEY_UP */
+				 <106>,		/* KEY_RIGHT */
+				 <108>,		/* KEY_DOWN */
+				 <105>,		/* KEY_LEFT */
+				 <109>,		/* KEY_PAGEDOWN */
+				 <104>;		/* KEY_PAGEUP */
 	};
 }
diff --git a/Documentation/devicetree/bindings/input/elan_i2c.txt b/Documentation/devicetree/bindings/input/elan_i2c.txt
new file mode 100644
index 0000000..ee3242c
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/elan_i2c.txt
@@ -0,0 +1,34 @@
+Elantech I2C Touchpad
+
+Required properties:
+- compatible: must be "elan,ekth3000".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+  binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+  binding[0]).
+
+Optional properties:
+- wakeup-source: touchpad can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+  pinctrl binding [1]).
+- vcc-supply: a phandle for the regulator supplying 3.3V power.
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+	&i2c1 {
+		/* ... */
+
+		touchpad@15 {
+			compatible = "elan,ekth3000";
+			reg = <0x15>;
+			interrupt-parent = <&gpio4>;
+			interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+			wakeup-source;
+		};
+
+		/* ... */
+	};
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
new file mode 100644
index 0000000..a765232
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -0,0 +1,33 @@
+Elantech I2C Touchscreen
+
+Required properties:
+- compatible: must be "elan,ekth3500".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+  binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+  binding[0]).
+
+Optional properties:
+- wakeup-source: touchscreen can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+  pinctrl binding [1]).
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+	&i2c1 {
+		/* ... */
+
+		touchscreen@10 {
+			compatible = "elan,ekth3500";
+			reg = <0x10>;
+			interrupt-parent = <&gpio4>;
+			interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+			wakeup-source;
+		};
+
+		/* ... */
+	};
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 5c2c021..a4a38fc 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -10,10 +10,13 @@
 Each button (key) is represented as a sub-node of "gpio-keys":
 Subnode properties:
 
-	- gpios: OF device-tree gpio specification.
 	- label: Descriptive name of the key.
 	- linux,code: Keycode to emit.
 
+Required mutual exclusive subnode-properties:
+	- gpios: OF device-tree gpio specification.
+	- interrupts: the interrupt line for that input
+
 Optional subnode-properties:
 	- linux,input-type: Specify event type this button/key generates.
 	  If not specified defaults to <1> == EV_KEY.
@@ -33,4 +36,9 @@
 				linux,code = <103>;
 				gpios = <&gpio1 0 1>;
 			};
+			button@22 {
+				label = "GPIO Key DOWN";
+				linux,code = <108>;
+				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+			};
 			...
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index ba61782..9dafe6b 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -6,6 +6,8 @@
 channel which can be either RGB, YUYV or BT656.
 
  - compatible: Must be one of the following
+   - "renesas,vin-r8a7794" for the R8A7794 device
+   - "renesas,vin-r8a7793" for the R8A7793 device
    - "renesas,vin-r8a7791" for the R8A7791 device
    - "renesas,vin-r8a7790" for the R8A7790 device
    - "renesas,vin-r8a7779" for the R8A7779 device
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 6edc3b6..1fe6dde 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -5,7 +5,9 @@
 - reg : should specify localbus address and size used for the chip,
 	and hardware ECC controller if available.
 	If the hardware ECC is PMECC, it should contain address and size for
-	PMECC, PMECC Error Location controller and ROM which has lookup tables.
+	PMECC and PMECC Error Location controller.
+	The PMECC lookup table address and size in ROM is optional. If not
+	specified, driver will build it in runtime.
 - atmel,nand-addr-offset : offset for the address latch.
 - atmel,nand-cmd-offset : offset for the command latch.
 - #address-cells, #size-cells : Must be present if the device has sub-nodes
@@ -27,7 +29,7 @@
   are: 512, 1024.
 - atmel,pmecc-lookup-table-offset : includes two offsets of lookup table in ROM
   for different sector size. First one is for sector size 512, the next is for
-  sector size 1024.
+  sector size 1024. If not specified, driver will build the table in runtime.
 - nand-bus-width : 8 or 16 bus width if not present 8
 - nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
 - Nand Flash Controller(NFC) is a slave driver under Atmel nand flash
diff --git a/Documentation/devicetree/bindings/mtd/diskonchip.txt b/Documentation/devicetree/bindings/mtd/diskonchip.txt
new file mode 100644
index 0000000..3e13bfd
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/diskonchip.txt
@@ -0,0 +1,15 @@
+M-Systems and Sandisk DiskOnChip devices
+
+M-System DiskOnChip G3
+======================
+The Sandisk (formerly M-Systems) docg3 is a nand device of 64M to 256MB.
+
+Required properties:
+ - compatible: should be "m-systems,diskonchip-g3"
+ - reg: register base and size
+
+Example:
+	docg3: flash@0 {
+		compatible = "m-systems,diskonchip-g3";
+		reg = <0x0 0x2000>;
+	};
diff --git a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
index 36ef07d..af8915b 100644
--- a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
@@ -11,8 +11,8 @@
   are made in native endianness.
 - #address-cells, #size-cells : Must be present if the device has sub-nodes
   representing partitions.
-- gpios : specifies the gpio pins to control the NAND device.  nwp is an
-  optional gpio and may be set to 0 if not present.
+- gpios : Specifies the GPIO pins to control the NAND device.  The order of
+  GPIO references is:  RDY, nCE, ALE, CLE, and an optional nWP.
 
 Optional properties:
 - bank-width : Width (in bytes) of the device.  If not present, the width
@@ -35,11 +35,11 @@
 	reg = <1 0x0000 0x2>;
 	#address-cells = <1>;
 	#size-cells = <1>;
-	gpios = <&banka 1 0	/* rdy */
-		 &banka 2 0 	/* nce */
-		 &banka 3 0 	/* ale */
-		 &banka 4 0 	/* cle */
-		 0		/* nwp */>;
+	gpios = <&banka 1 0>,	/* RDY */
+		<&banka 2 0>, 	/* nCE */
+		<&banka 3 0>, 	/* ALE */
+		<&banka 4 0>, 	/* CLE */
+		<0>;		/* nWP */
 
 	partition@0 {
 	...
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
new file mode 100644
index 0000000..0273adb
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -0,0 +1,45 @@
+Allwinner NAND Flash Controller (NFC)
+
+Required properties:
+- compatible : "allwinner,sun4i-a10-nand".
+- reg : shall contain registers location and length for data and reg.
+- interrupts : shall define the nand controller interrupt.
+- #address-cells: shall be set to 1. Encode the nand CS.
+- #size-cells : shall be set to 0.
+- clocks : shall reference nand controller clocks.
+- clock-names : nand controller internal clock names. Shall contain :
+    * "ahb" : AHB gating clock
+    * "mod" : nand controller clock
+
+Optional children nodes:
+Children nodes represent the available nand chips.
+
+Optional properties:
+- allwinner,rb : shall contain the native Ready/Busy ids.
+ or
+- rb-gpios : shall contain the gpios used as R/B pins.
+- nand-ecc-mode : one of the supported ECC modes ("hw", "hw_syndrome", "soft",
+  "soft_bch" or "none")
+
+see Documentation/devicetree/mtd/nand.txt for generic bindings.
+
+
+Examples:
+nfc: nand@01c03000 {
+	compatible = "allwinner,sun4i-a10-nand";
+	reg = <0x01c03000 0x1000>;
+	interrupts = <0 37 1>;
+	clocks = <&ahb_gates 13>, <&nand_clk>;
+	clock-names = "ahb", "mod";
+	#address-cells = <1>;
+	#size-cells = <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&nand_pins_a &nand_cs0_pins_a &nand_rb0_pins_a>;
+	status = "okay";
+
+	nand@0 {
+		reg = <0>;
+		allwinner,rb = <0>;
+		nand-ecc-mode = "soft_bch";
+	};
+};
diff --git a/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
new file mode 100644
index 0000000..cfda0d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
@@ -0,0 +1,29 @@
+Device-Tree bindings for Atmel's HLCDC (High-end LCD Controller) PWM driver
+
+The Atmel HLCDC PWM is subdevice of the HLCDC MFD device.
+See ../mfd/atmel-hlcdc.txt for more details.
+
+Required properties:
+ - compatible: value should be one of the following:
+   "atmel,hlcdc-pwm"
+ - pinctr-names: the pin control state names. Should contain "default".
+ - pinctrl-0: should contain the pinctrl states described by pinctrl
+   default.
+ - #pwm-cells: should be set to 3. This PWM chip use the default 3 cells
+   bindings defined in pwm.txt in this directory.
+
+Example:
+
+	hlcdc: hlcdc@f0030000 {
+		compatible = "atmel,sama5d3-hlcdc";
+		reg = <0xf0030000 0x2000>;
+		clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+		clock-names = "periph_clk","sys_clk", "slow_clk";
+
+		hlcdc_pwm: hlcdc-pwm {
+			compatible = "atmel,hlcdc-pwm";
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_lcd_pwm>;
+			#pwm-cells = <3>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
new file mode 100644
index 0000000..fb6fb31
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
@@ -0,0 +1,30 @@
+BCM2835 PWM controller (Raspberry Pi controller)
+
+Required properties:
+- compatible: should be "brcm,bcm2835-pwm"
+- reg: physical base address and length of the controller's registers
+- clock: This clock defines the base clock frequency of the PWM hardware
+  system, the period and the duty_cycle of the PWM signal is a multiple of
+  the base period.
+- #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
+  the cells format.
+
+Examples:
+
+pwm@2020c000 {
+	compatible = "brcm,bcm2835-pwm";
+	reg = <0x2020c000 0x28>;
+	clocks = <&clk_pwm>;
+	#pwm-cells = <2>;
+};
+
+clocks {
+	....
+		clk_pwm: pwm {
+			compatible = "fixed-clock";
+			reg = <3>;
+			#clock-cells = <0>;
+			clock-frequency = <9200000>;
+		};
+	....
+};
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index 4cf0249..4698e0e 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -5,17 +5,9 @@
 - compatible:	Should be set to one of the following:
 		marvell,armada370-thermal
 		marvell,armada375-thermal
-		marvell,armada375-z1-thermal
 		marvell,armada380-thermal
 		marvell,armadaxp-thermal
 
-		Note: As the name suggests, "marvell,armada375-z1-thermal"
-		applies for the SoC Z1 stepping only. On such stepping
-		some quirks need to be done and the register offset differs
-		from the one in the A0 stepping.
-		The operating system may auto-detect the SoC stepping and
-		update the compatible and register offsets at runtime.
-
 - reg:		Device's register space.
 		Two entries are expected, see the examples below.
 		The first one is required for the sensor register;
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
new file mode 100644
index 0000000..ef802de
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -0,0 +1,68 @@
+* Temperature Sensor ADC (TSADC) on rockchip SoCs
+
+Required properties:
+- compatible : "rockchip,rk3288-tsadc"
+- reg : physical base address of the controller and length of memory mapped
+	region.
+- interrupts : The interrupt number to the cpu. The interrupt specifier format
+	       depends on the interrupt controller.
+- clocks : Must contain an entry for each entry in clock-names.
+- clock-names : Shall be "tsadc" for the converter-clock, and "apb_pclk" for
+		the peripheral clock.
+- resets : Must contain an entry for each entry in reset-names.
+	   See ../reset/reset.txt for details.
+- reset-names : Must include the name "tsadc-apb".
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
+- rockchip,hw-tshut-temp : The hardware-controlled shutdown temperature value.
+- rockchip,hw-tshut-mode : The hardware-controlled shutdown mode 0:CRU 1:GPIO.
+- rockchip,hw-tshut-polarity : The hardware-controlled active polarity 0:LOW
+			       1:HIGH.
+
+Exiample:
+tsadc: tsadc@ff280000 {
+	compatible = "rockchip,rk3288-tsadc";
+	reg = <0xff280000 0x100>;
+	interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+	clock-names = "tsadc", "apb_pclk";
+	resets = <&cru SRST_TSADC>;
+	reset-names = "tsadc-apb";
+	pinctrl-names = "default";
+	pinctrl-0 = <&otp_out>;
+	#thermal-sensor-cells = <1>;
+	rockchip,hw-tshut-temp = <95000>;
+	rockchip,hw-tshut-mode = <0>;
+	rockchip,hw-tshut-polarity = <0>;
+};
+
+Example: referring to thermal sensors:
+thermal-zones {
+	cpu_thermal: cpu_thermal {
+		polling-delay-passive = <1000>; /* milliseconds */
+		polling-delay = <5000>; /* milliseconds */
+
+		/* sensor	ID */
+		thermal-sensors = <&tsadc	1>;
+
+		trips {
+			cpu_alert0: cpu_alert {
+				temperature = <70000>; /* millicelsius */
+				hysteresis = <2000>; /* millicelsius */
+				type = "passive";
+			};
+			cpu_crit: cpu_crit {
+				temperature = <90000>; /* millicelsius */
+				hysteresis = <2000>; /* millicelsius */
+				type = "critical";
+			};
+		};
+
+		cooling-maps {
+			map0 {
+				trip = <&cpu_alert0>;
+				cooling-device =
+				    <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
new file mode 100644
index 0000000..ecf3ed7
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tegra-soctherm.txt
@@ -0,0 +1,53 @@
+Tegra124 SOCTHERM thermal management system
+
+The SOCTHERM IP block contains thermal sensors, support for polled
+or interrupt-based thermal monitoring, CPU and GPU throttling based
+on temperature trip points, and handling external overcurrent
+notifications. It is also used to manage emergency shutdown in an
+overheating situation.
+
+Required properties :
+- compatible : "nvidia,tegra124-soctherm".
+- reg : Should contain 1 entry:
+  - SOCTHERM register set
+- interrupts : Defines the interrupt used by SOCTHERM
+- clocks : Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+  - tsensor
+  - soctherm
+- resets : Must contain an entry for each entry in reset-names.
+  See ../reset/reset.txt for details.
+- reset-names : Must include the following entries:
+  - soctherm
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description
+    of this property. See <dt-bindings/thermal/tegra124-soctherm.h> for a
+    list of valid values when referring to thermal sensors.
+
+
+Example :
+
+	soctherm@0,700e2000 {
+		compatible = "nvidia,tegra124-soctherm";
+		reg = <0x0 0x700e2000 0x0 0x1000>;
+		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
+			<&tegra_car TEGRA124_CLK_SOC_THERM>;
+		clock-names = "tsensor", "soctherm";
+		resets = <&tegra_car 78>;
+		reset-names = "soctherm";
+
+		#thermal-sensor-cells = <1>;
+	};
+
+Example: referring to thermal sensors :
+
+       thermal-zones {
+                cpu {
+                        polling-delay-passive = <1000>;
+                        polling-delay = <1000>;
+
+                        thermal-sensors =
+                                <&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+                };
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 423d474..b1df0ad 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -47,6 +47,7 @@
 dmo	Data Modul AG
 ebv	EBV Elektronik
 edt	Emerging Display Technologies
+elan	Elan Microelectronic Corp.
 emmicro	EM Microelectronic
 energymicro	Silicon Laboratories (formerly Energy Micro AS)
 epcos	EPCOS AG
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
deleted file mode 100644
index ffb5c80..0000000
--- a/Documentation/ia64/kvm.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Currently, kvm module is in EXPERIMENTAL stage on IA64. This means that
-interfaces are not stable enough to use. So, please don't run critical
-applications in virtual machine.
-We will try our best to improve it in future versions!
-
-				Guide: How to boot up guests on kvm/ia64
-
-This guide is to describe how to enable kvm support for IA-64 systems.
-
-1. Get the kvm source from git.kernel.org.
-	Userspace source:
-		git clone git://git.kernel.org/pub/scm/virt/kvm/kvm-userspace.git
-	Kernel Source:
-		git clone git://git.kernel.org/pub/scm/linux/kernel/git/xiantao/kvm-ia64.git
-
-2. Compile the source code.
-	2.1 Compile userspace code:
-		(1)cd ./kvm-userspace
-		(2)./configure
-		(3)cd kernel
-		(4)make sync LINUX= $kernel_dir (kernel_dir is the directory of kernel source.)
-		(5)cd ..
-		(6)make qemu
-		(7)cd qemu; make install
-
-	2.2 Compile kernel source code:
-		(1) cd ./$kernel_dir
-		(2) Make menuconfig
-		(3) Enter into virtualization option, and choose kvm.
-		(4) make
-		(5) Once (4) done, make modules_install
-		(6) Make initrd, and use new kernel to reboot up host machine.
-		(7) Once (6) done, cd $kernel_dir/arch/ia64/kvm
-		(8) insmod kvm.ko; insmod kvm-intel.ko
-
-Note: For step 2, please make sure that host page size == TARGET_PAGE_SIZE of qemu, otherwise, may fail.
-
-3. Get Guest Firmware named as Flash.fd, and put it under right place:
-	(1) If you have the guest firmware (binary) released by Intel Corp for Xen, use it directly.
-
-	(2) If you have no firmware at hand, Please download its source from
-		hg clone http://xenbits.xensource.com/ext/efi-vfirmware.hg
-	    you can get the firmware's binary in the directory of efi-vfirmware.hg/binaries.
-
-	(3) Rename the firmware you owned to Flash.fd, and copy it to /usr/local/share/qemu
-
-4. Boot up Linux or Windows guests:
-	4.1 Create or install a image for guest boot. If you have xen experience, it should be easy.
-
-	4.2 Boot up guests use the following command.
-		/usr/local/bin/qemu-system-ia64 -smp xx -m 512 -hda $your_image
-		(xx is the number of virtual processors for the guest, now the maximum value is 4)
-
-5. Known possible issue on some platforms with old Firmware.
-
-In the event of strange host crash issues, try to solve it through either of the following ways:
-
-(1): Upgrade your Firmware to the latest one.
-
-(2): Applying the below patch to kernel source.
-diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
-index 0b53344..f02b0f7 100644
---- a/arch/ia64/kernel/pal.S
-+++ b/arch/ia64/kernel/pal.S
-@@ -84,7 +84,8 @@ GLOBAL_ENTRY(ia64_pal_call_static)
-	mov ar.pfs = loc1
-	mov rp = loc0
-	;;
--	srlz.d				// serialize restoration of psr.l
-+	srlz.i			// serialize restoration of psr.l
-+	;;
-	br.ret.sptk.many b0
- END(ia64_pal_call_static)
-
-6. Bug report:
-	If you found any issues when use kvm/ia64, Please post the bug info to kvm-ia64-devel mailing list.
-	https://lists.sourceforge.net/lists/listinfo/kvm-ia64-devel/
-
-Thanks for your interest! Let's work together, and make kvm/ia64 stronger and stronger!
-
-
-								Xiantao Zhang <xiantao.zhang@intel.com>
-											2008.3.10
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bda85f1..4df73da 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1457,6 +1457,15 @@
 		       disable
 		         Do not enable intel_pstate as the default
 		         scaling driver for the supported processors
+		       force
+			 Enable intel_pstate on systems that prohibit it by default
+			 in favor of acpi-cpufreq. Forcing the intel_pstate driver
+			 instead of acpi-cpufreq may disable platform features, such
+			 as thermal controls and power capping, that rely on ACPI
+			 P-States information being indicated to OSPM and therefore
+			 should be used with caution. This option does not work with
+			 processors that aren't supported by the intel_pstate driver
+			 or on platforms that use pcc-cpufreq instead of acpi-cpufreq.
 		       no_hwp
 		         Do not enable hardware P state control (HWP)
 			 if available.
diff --git a/Documentation/networking/fib_trie.txt b/Documentation/networking/fib_trie.txt
index 0723db7..fe71938 100644
--- a/Documentation/networking/fib_trie.txt
+++ b/Documentation/networking/fib_trie.txt
@@ -73,8 +73,8 @@
 
 trie_rebalance()
 	The key function for the dynamic trie after any change in the trie
-	it is run to optimize and reorganize. Tt will walk the trie upwards 
-	towards the root from a given tnode, doing a resize() at each step 
+	it is run to optimize and reorganize. It will walk the trie upwards
+	towards the root from a given tnode, doing a resize() at each step
 	to implement level compression.
 
 resize()
diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt
index e5a940e..6cfc854 100644
--- a/Documentation/video4linux/vivid.txt
+++ b/Documentation/video4linux/vivid.txt
@@ -640,6 +640,21 @@
 	Changing the colorspace will result in the V4L2_EVENT_SOURCE_CHANGE
 	to be sent since it emulates a detected colorspace change.
 
+Y'CbCr Encoding: selects which Y'CbCr encoding should be used when generating
+	a Y'CbCr image.	This only applies if the CSC Colorbar test pattern is
+	selected, and if the format is set to a Y'CbCr format as opposed to an
+	RGB format.
+
+	Changing the Y'CbCr encoding will result in the V4L2_EVENT_SOURCE_CHANGE
+	to be sent since it emulates a detected colorspace change.
+
+Quantization: selects which quantization should be used for the RGB or Y'CbCr
+	encoding when generating the test pattern. This only applies if the CSC
+	Colorbar test pattern is selected.
+
+	Changing the quantization will result in the V4L2_EVENT_SOURCE_CHANGE
+	to be sent since it emulates a detected colorspace change.
+
 Limited RGB Range (16-235): selects if the RGB range of the HDMI source should
 	be limited or full range. This combines with the Digital Video 'Rx RGB
 	Quantization Range' control and can be used to test what happens if
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7610eaa..0007fef 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -68,9 +68,12 @@
 
   Capability: which KVM extension provides this ioctl.  Can be 'basic',
       which means that is will be provided by any kernel that supports
-      API version 12 (see section 4.1), or a KVM_CAP_xyz constant, which
+      API version 12 (see section 4.1), a KVM_CAP_xyz constant, which
       means availability needs to be checked with KVM_CHECK_EXTENSION
-      (see section 4.4).
+      (see section 4.4), or 'none' which means that while not all kernels
+      support this ioctl, there's no capability bit to check its
+      availability: for kernels that don't support the ioctl,
+      the ioctl returns -ENOTTY.
 
   Architectures: which instruction set architectures provide this ioctl.
       x86 includes both i386 and x86_64.
@@ -604,7 +607,7 @@
 4.24 KVM_CREATE_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390)
-Architectures: x86, ia64, ARM, arm64, s390
+Architectures: x86, ARM, arm64, s390
 Type: vm ioctl
 Parameters: none
 Returns: 0 on success, -1 on error
@@ -612,7 +615,7 @@
 Creates an interrupt controller model in the kernel.  On x86, creates a virtual
 ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
 local APIC.  IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
-only go to the IOAPIC.  On ia64, a IOSAPIC is created. On ARM/arm64, a GIC is
+only go to the IOAPIC.  On ARM/arm64, a GIC is
 created. On s390, a dummy irq routing table is created.
 
 Note that on s390 the KVM_CAP_S390_IRQCHIP vm capability needs to be enabled
@@ -622,7 +625,7 @@
 4.25 KVM_IRQ_LINE
 
 Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64, arm, arm64
+Architectures: x86, arm, arm64
 Type: vm ioctl
 Parameters: struct kvm_irq_level
 Returns: 0 on success, -1 on error
@@ -676,7 +679,7 @@
 4.26 KVM_GET_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_irqchip (in/out)
 Returns: 0 on success, -1 on error
@@ -698,7 +701,7 @@
 4.27 KVM_SET_IRQCHIP
 
 Capability: KVM_CAP_IRQCHIP
-Architectures: x86, ia64
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_irqchip (in)
 Returns: 0 on success, -1 on error
@@ -991,7 +994,7 @@
 4.38 KVM_GET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64, s390
+Architectures: x86, s390
 Type: vcpu ioctl
 Parameters: struct kvm_mp_state (out)
 Returns: 0 on success; -1 on error
@@ -1005,16 +1008,15 @@
 
 Possible values are:
 
- - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running [x86, ia64]
+ - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running [x86]
  - KVM_MP_STATE_UNINITIALIZED:   the vcpu is an application processor (AP)
-                                 which has not yet received an INIT signal [x86,
-                                 ia64]
+                                 which has not yet received an INIT signal [x86]
  - KVM_MP_STATE_INIT_RECEIVED:   the vcpu has received an INIT signal, and is
-                                 now ready for a SIPI [x86, ia64]
+                                 now ready for a SIPI [x86]
  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
-                                 is waiting for an interrupt [x86, ia64]
+                                 is waiting for an interrupt [x86]
  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
-                                 accessible via KVM_GET_VCPU_EVENTS) [x86, ia64]
+                                 accessible via KVM_GET_VCPU_EVENTS) [x86]
  - KVM_MP_STATE_STOPPED:         the vcpu is stopped [s390]
  - KVM_MP_STATE_CHECK_STOP:      the vcpu is in a special error state [s390]
  - KVM_MP_STATE_OPERATING:       the vcpu is operating (running or halted)
@@ -1022,7 +1024,7 @@
  - KVM_MP_STATE_LOAD:            the vcpu is in a special load/startup state
                                  [s390]
 
-On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
 in-kernel irqchip, the multiprocessing state must be maintained by userspace on
 these architectures.
 
@@ -1030,7 +1032,7 @@
 4.39 KVM_SET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64, s390
+Architectures: x86, s390
 Type: vcpu ioctl
 Parameters: struct kvm_mp_state (in)
 Returns: 0 on success; -1 on error
@@ -1038,7 +1040,7 @@
 Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
 arguments.
 
-On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
 in-kernel irqchip, the multiprocessing state must be maintained by userspace on
 these architectures.
 
@@ -1065,7 +1067,7 @@
 4.41 KVM_SET_BOOT_CPU_ID
 
 Capability: KVM_CAP_SET_BOOT_CPU_ID
-Architectures: x86, ia64
+Architectures: x86
 Type: vm ioctl
 Parameters: unsigned long vcpu_id
 Returns: 0 on success, -1 on error
@@ -1257,8 +1259,8 @@
 
 4.48 KVM_ASSIGN_PCI_DEVICE
 
-Capability: KVM_CAP_DEVICE_ASSIGNMENT
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_assigned_pci_dev (in)
 Returns: 0 on success, -1 on error
@@ -1298,25 +1300,36 @@
 device assignment.  The user requesting this ioctl must have read/write
 access to the PCI sysfs resource files associated with the device.
 
+Errors:
+  ENOTTY: kernel does not support this ioctl
+
+  Other error conditions may be defined by individual device types or
+  have their standard meanings.
+
 
 4.49 KVM_DEASSIGN_PCI_DEVICE
 
-Capability: KVM_CAP_DEVICE_DEASSIGNMENT
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_assigned_pci_dev (in)
 Returns: 0 on success, -1 on error
 
 Ends PCI device assignment, releasing all associated resources.
 
-See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
+See KVM_ASSIGN_PCI_DEVICE for the data structure. Only assigned_dev_id is
 used in kvm_assigned_pci_dev to identify the device.
 
+Errors:
+  ENOTTY: kernel does not support this ioctl
+
+  Other error conditions may be defined by individual device types or
+  have their standard meanings.
 
 4.50 KVM_ASSIGN_DEV_IRQ
 
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86 ia64
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_assigned_irq (in)
 Returns: 0 on success, -1 on error
@@ -1346,11 +1359,17 @@
 It is not valid to specify multiple types per host or guest IRQ. However, the
 IRQ type of host and guest can differ or can even be null.
 
+Errors:
+  ENOTTY: kernel does not support this ioctl
+
+  Other error conditions may be defined by individual device types or
+  have their standard meanings.
+
 
 4.51 KVM_DEASSIGN_DEV_IRQ
 
 Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86 ia64
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_assigned_irq (in)
 Returns: 0 on success, -1 on error
@@ -1365,7 +1384,7 @@
 4.52 KVM_SET_GSI_ROUTING
 
 Capability: KVM_CAP_IRQ_ROUTING
-Architectures: x86 ia64 s390
+Architectures: x86 s390
 Type: vm ioctl
 Parameters: struct kvm_irq_routing (in)
 Returns: 0 on success, -1 on error
@@ -1423,8 +1442,8 @@
 
 4.53 KVM_ASSIGN_SET_MSIX_NR
 
-Capability: KVM_CAP_DEVICE_MSIX
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_assigned_msix_nr (in)
 Returns: 0 on success, -1 on error
@@ -1445,8 +1464,8 @@
 
 4.54 KVM_ASSIGN_SET_MSIX_ENTRY
 
-Capability: KVM_CAP_DEVICE_MSIX
-Architectures: x86 ia64
+Capability: none
+Architectures: x86
 Type: vm ioctl
 Parameters: struct kvm_assigned_msix_entry (in)
 Returns: 0 on success, -1 on error
@@ -1461,6 +1480,12 @@
 	__u16 padding[3];
 };
 
+Errors:
+  ENOTTY: kernel does not support this ioctl
+
+  Other error conditions may be defined by individual device types or
+  have their standard meanings.
+
 
 4.55 KVM_SET_TSC_KHZ
 
@@ -2453,9 +2478,15 @@
 Note that because some registers reflect machine topology, all vcpus
 should be created before this ioctl is invoked.
 
+Userspace can call this function multiple times for a given vcpu, including
+after the vcpu has been run. This will reset the vcpu to its initial
+state. All calls to this function after the initial call must use the same
+target and same set of feature flags, otherwise EINVAL will be returned.
+
 Possible features:
 	- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
-	  Depends on KVM_CAP_ARM_PSCI.
+	  Depends on KVM_CAP_ARM_PSCI.  If not set, the CPU will be powered on
+	  and execute guest code when KVM_RUN is called.
 	- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
 	  Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
 	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
@@ -2951,6 +2982,15 @@
 the system-level event type. The 'flags' field describes architecture
 specific flags for the system-level event.
 
+Valid values for 'type' are:
+  KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
+   VM. Userspace is not obliged to honour this, and if it does honour
+   this does not need to destroy the VM synchronously (ie it may call
+   KVM_RUN again before shutdown finally occurs).
+  KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
+   As with SHUTDOWN, userspace can choose to ignore the request, or
+   to schedule the reset to occur in the future and may call KVM_RUN again.
+
 		/* Fix the size of the union. */
 		char padding[256];
 	};
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index 0d16f96..d426fc8 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -12,14 +12,14 @@
 1. GROUP: KVM_S390_VM_MEM_CTRL
 Architectures: s390
 
-1.1. ATTRIBUTE: KVM_S390_VM_MEM_CTRL
+1.1. ATTRIBUTE: KVM_S390_VM_MEM_ENABLE_CMMA
 Parameters: none
-Returns: -EBUSY if already a vcpus is defined, otherwise 0
+Returns: -EBUSY if a vcpu is already defined, otherwise 0
 
-Enables CMMA for the virtual machine
+Enables Collaborative Memory Management Assist (CMMA) for the virtual machine.
 
-1.2. ATTRIBUTE: KVM_S390_VM_CLR_CMMA
-Parameteres: none
+1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA
+Parameters: none
 Returns: 0
 
 Clear the CMMA status for all guest pages, so any pages the guest marked
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 6d470ae..2a71c8f 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -168,7 +168,7 @@
 	64 byte memory area which must be in guest RAM and must be
 	zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
 	when asynchronous page faults are enabled on the vcpu 0 when
-	disabled. Bit 2 is 1 if asynchronous page faults can be injected
+	disabled. Bit 1 is 1 if asynchronous page faults can be injected
 	when vcpu is in cpl == 0.
 
 	First 4 byte of 64 byte memory location will be written to by
diff --git a/Documentation/x86/intel_mpx.txt b/Documentation/x86/intel_mpx.txt
index 4472ed2..818518a 100644
--- a/Documentation/x86/intel_mpx.txt
+++ b/Documentation/x86/intel_mpx.txt
@@ -7,11 +7,15 @@
 references, for those references whose compile-time normal intentions are
 usurped at runtime due to buffer overflow or underflow.
 
+You can tell if your CPU supports MPX by looking in /proc/cpuinfo:
+
+	cat /proc/cpuinfo  | grep ' mpx '
+
 For more information, please refer to Intel(R) Architecture Instruction
 Set Extensions Programming Reference, Chapter 9: Intel(R) Memory Protection
 Extensions.
 
-Note: Currently no hardware with MPX ISA is available but it is always
+Note: As of December 2014, no hardware with MPX is available but it is
 possible to use SDE (Intel(R) Software Development Emulator) instead, which
 can be downloaded from
 http://software.intel.com/en-us/articles/intel-software-development-emulator
@@ -30,9 +34,15 @@
    instrumentation as well as some setup code called early after the app
    starts. New instruction prefixes are noops for old CPUs.
 2) That setup code allocates (virtual) space for the "bounds directory",
-   points the "bndcfgu" register to the directory and notifies the kernel
-   (via the new prctl(PR_MPX_ENABLE_MANAGEMENT)) that the app will be using
-   MPX.
+   points the "bndcfgu" register to the directory (must also set the valid
+   bit) and notifies the kernel (via the new prctl(PR_MPX_ENABLE_MANAGEMENT))
+   that the app will be using MPX.  The app must be careful not to access
+   the bounds tables between the time when it populates "bndcfgu" and
+   when it calls the prctl().  This might be hard to guarantee if the app
+   is compiled with MPX.  You can add "__attribute__((bnd_legacy))" to
+   the function to disable MPX instrumentation to help guarantee this.
+   Also be careful not to call out to any other code which might be
+   MPX-instrumented.
 3) The kernel detects that the CPU has MPX, allows the new prctl() to
    succeed, and notes the location of the bounds directory. Userspace is
    expected to keep the bounds directory at that locationWe note it
diff --git a/MAINTAINERS b/MAINTAINERS
index 4aac6c8..08f671d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4255,6 +4255,12 @@
 S:	Maintained
 F:	drivers/media/usb/go7007/
 
+GOODIX TOUCHSCREEN
+M:	Bastien Nocera <hadess@hadess.net>
+L:	linux-input@vger.kernel.org
+S:	Maintained
+F:	drivers/input/touchscreen/goodix.c
+
 GPIO SUBSYSTEM
 M:	Linus Walleij <linus.walleij@linaro.org>
 M:	Alexandre Courbot <gnurou@gmail.com>
@@ -4957,6 +4963,12 @@
 S:	Supported
 F:	drivers/idle/intel_idle.c
 
+INTEL PSTATE DRIVER
+M:	Kristen Carlson Accardi <kristen@linux.intel.com>
+L:	linux-pm@vger.kernel.org
+S:	Supported
+F:	drivers/cpufreq/intel_pstate.c
+
 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
 M:	Maik Broemme <mbroemme@plusserver.de>
 L:	linux-fbdev@vger.kernel.org
@@ -5489,15 +5501,6 @@
 F:	arch/powerpc/include/asm/kvm*
 F:	arch/powerpc/kvm/
 
-KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64)
-M:	Xiantao Zhang <xiantao.zhang@intel.com>
-L:	kvm-ia64@vger.kernel.org
-W:	http://kvm.qumranet.com
-S:	Supported
-F:	Documentation/ia64/kvm.txt
-F:	arch/ia64/include/asm/kvm*
-F:	arch/ia64/kvm/
-
 KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
 M:	Christian Borntraeger <borntraeger@de.ibm.com>
 M:	Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -6613,19 +6616,8 @@
 S:	Maintained
 
 NETWORKING [WIRELESS]
-M:	"John W. Linville" <linville@tuxdriver.com>
 L:	linux-wireless@vger.kernel.org
 Q:	http://patchwork.kernel.org/project/linux-wireless/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
-S:	Maintained
-F:	net/mac80211/
-F:	net/rfkill/
-F:	net/wireless/
-F:	include/net/ieee80211*
-F:	include/linux/wireless.h
-F:	include/uapi/linux/wireless.h
-F:	include/net/iw_handler.h
-F:	drivers/net/wireless/
 
 NETWORKING DRIVERS
 L:	netdev@vger.kernel.org
@@ -6646,6 +6638,14 @@
 F:	include/uapi/linux/if_*
 F:	include/uapi/linux/netdevice.h
 
+NETWORKING DRIVERS (WIRELESS)
+M:	Kalle Valo <kvalo@codeaurora.org>
+L:	linux-wireless@vger.kernel.org
+Q:	http://patchwork.kernel.org/project/linux-wireless/list/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git/
+S:	Maintained
+F:	drivers/net/wireless/
+
 NETXEN (1/10) GbE SUPPORT
 M:	Manish Chopra <manish.chopra@qlogic.com>
 M:	Sony Chacko <sony.chacko@qlogic.com>
@@ -9510,6 +9510,7 @@
 S:	Supported
 F:	drivers/thermal/
 F:	include/linux/thermal.h
+F:	include/uapi/linux/thermal.h
 F:	include/linux/cpu_cooling.h
 F:	Documentation/devicetree/bindings/thermal/
 
@@ -10242,13 +10243,13 @@
 S:	Maintained
 F:	drivers/net/ethernet/via/via-velocity.*
 
-VIVI VIRTUAL VIDEO DRIVER
+VIVID VIRTUAL VIDEO DRIVER
 M:	Hans Verkuil <hverkuil@xs4all.nl>
 L:	linux-media@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Maintained
-F:	drivers/media/platform/vivi*
+F:	drivers/media/platform/vivid/*
 
 VLAN (802.1Q)
 M:	Patrick McHardy <kaber@trash.net>
diff --git a/Makefile b/Makefile
index fd80c6e..fa9604d 100644
--- a/Makefile
+++ b/Makefile
@@ -481,9 +481,10 @@
 # of make so .config is not included in this case either (for *config).
 
 version_h := include/generated/uapi/linux/version.h
+old_version_h := include/linux/version.h
 
 no-dot-config-targets := clean mrproper distclean \
-			 cscope gtags TAGS tags help %docs check% coccicheck \
+			 cscope gtags TAGS tags help% %docs check% coccicheck \
 			 $(version_h) headers_% archheaders archscripts \
 			 kernelversion %src-pkg
 
@@ -1005,6 +1006,7 @@
 
 $(version_h): $(srctree)/Makefile FORCE
 	$(call filechk,version.h)
+	$(Q)rm -f $(old_version_h)
 
 include/generated/utsrelease.h: include/config/kernel.release FORCE
 	$(call filechk,utsrelease.h)
@@ -1036,8 +1038,6 @@
 #Default location for installed headers
 export INSTALL_HDR_PATH = $(objtree)/usr
 
-hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
-
 # If we do an all arch process set dst to asm-$(hdr-arch)
 hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
 
@@ -1175,7 +1175,7 @@
 		  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
 		  signing_key.priv signing_key.x509 x509.genkey		\
 		  extra_certificates signing_key.x509.keyid		\
-		  signing_key.x509.signer include/linux/version.h
+		  signing_key.x509.signer
 
 # clean - Delete most, but leave enough to build external modules
 #
@@ -1235,7 +1235,7 @@
 # ---------------------------------------------------------------------------
 
 boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig)
-boards := $(notdir $(boards))
+boards := $(sort $(notdir $(boards)))
 board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
 board-dirs := $(sort $(notdir $(board-dirs:/=)))
 
@@ -1326,7 +1326,7 @@
 
 help-boards: $(help-board-dirs)
 
-boards-per-dir = $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig))
+boards-per-dir = $(sort $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig)))
 
 $(help-board-dirs): help-%:
 	@echo  'Architecture specific targets ($(SRCARCH) $*):'
@@ -1581,11 +1581,6 @@
   include $(cmd_files)
 endif
 
-# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir
-# Usage:
-# $(Q)$(MAKE) $(clean)=dir
-clean := -f $(srctree)/scripts/Makefile.clean obj
-
 endif	# skip-makefile
 
 PHONY += FORCE
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index fe44b24..df94ac1 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -428,3 +428,4 @@
 source "security/Kconfig"
 source "crypto/Kconfig"
 source "lib/Kconfig"
+source "kernel/power/Kconfig"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 10bc3d4..db72fec 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -12,7 +12,7 @@
 CROSS_COMPILE := arc-linux-uclibc-
 endif
 
-KBUILD_DEFCONFIG := fpga_defconfig
+KBUILD_DEFCONFIG := nsim_700_defconfig
 
 cflags-y	+= -mA7 -fno-common -pipe -fno-builtin -D__linux__
 
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index cfaedd9..1c169dc 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -20,7 +20,7 @@
 		/* this is for console on PGU */
 		/* bootargs = "console=tty0 consoleblank=0"; */
 		/* this is for console on serial */
-		bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+		bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
 	};
 
 	aliases {
@@ -41,9 +41,9 @@
 			#interrupt-cells = <1>;
 		};
 
-		uart0: serial@c0000000 {
+		uart0: serial@f0000000 {
 			compatible = "ns8250";
-			reg = <0xc0000000 0x2000>;
+			reg = <0xf0000000 0x2000>;
 			interrupts = <11>;
 			clock-frequency = <3686400>;
 			baud = <115200>;
@@ -52,21 +52,21 @@
 			no-loopback-test = <1>;
 		};
 
-		pgu0: pgu@c9000000 {
+		pgu0: pgu@f9000000 {
 			compatible = "snps,arcpgufb";
-			reg = <0xc9000000 0x400>;
+			reg = <0xf9000000 0x400>;
 		};
 
-		ps2: ps2@c9001000 {
+		ps2: ps2@f9001000 {
 			compatible = "snps,arc_ps2";
-			reg = <0xc9000400 0x14>;
+			reg = <0xf9000400 0x14>;
 			interrupts = <13>;
 			interrupt-names = "arc_ps2_irq";
 		};
 
-		eth0: ethernet@c0003000 {
+		eth0: ethernet@f0003000 {
 			compatible = "snps,oscilan";
-			reg = <0xc0003000 0x44>;
+			reg = <0xf0003000 0x44>;
 			interrupts = <7>, <8>;
 			interrupt-names = "rx", "tx";
 		};
diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig
deleted file mode 100644
index 49c9301..0000000
--- a/arch/arc/configs/fpga_noramfs_defconfig
+++ /dev/null
@@ -1,63 +0,0 @@
-CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_FPGA_LEGACY=y
-# CONFIG_ARC_HAS_RTSC is not set
-CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
-CONFIG_PREEMPT=y
-# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_UNIX_DIAG=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_IPV6 is not set
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-CONFIG_ARC_EMAC=y
-CONFIG_LXT_PHY=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_ARC=y
-CONFIG_SERIAL_ARC_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/nsim_700_defconfig
similarity index 100%
rename from arch/arc/configs/fpga_defconfig
rename to arch/arc/configs/nsim_700_defconfig
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index 742816f..27ecc69 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -41,6 +41,15 @@
 
 /******************************************************************
  * IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ * 	Orig Bug + Rejected solution	: https://lkml.org/lkml/2013/3/29/67
+ * 	Reasoning			: https://lkml.org/lkml/2013/4/8/15
+ *
  ******************************************************************/
 
 /*
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index d01df0c..20ebb60 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -26,8 +26,10 @@
 #include <asm/setup.h>
 #include <asm/mach_desc.h>
 
+#ifndef CONFIG_ARC_HAS_LLSC
 arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+#endif
 
 struct plat_smp_ops  plat_smp_ops;
 
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 51b373f..4eb540b 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1942,4 +1942,48 @@
 			 <&tegra_car TEGRA124_CLK_EXTERN1>;
 		clock-names = "pll_a", "pll_a_out0", "mclk";
 	};
+
+	thermal-zones {
+		cpu {
+			trips {
+				trip@0 {
+					temperature = <101000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/* There are currently no cooling maps because there are no cooling devices */
+			};
+		};
+
+		mem {
+			trips {
+				trip@0 {
+					temperature = <101000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/* There are currently no cooling maps because there are no cooling devices */
+			};
+		};
+
+		gpu {
+			trips {
+				trip@0 {
+					temperature = <101000>;
+					hysteresis = <0>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				/* There are currently no cooling maps because there are no cooling devices */
+			};
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 3ad2e3c..4be06c6 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -4,6 +4,7 @@
 #include <dt-bindings/pinctrl/pinctrl-tegra.h>
 #include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/tegra124-soctherm.h>
 
 #include "skeleton.dtsi"
 
@@ -657,6 +658,18 @@
 		status = "disabled";
 	};
 
+	soctherm: thermal-sensor@0,700e2000 {
+		compatible = "nvidia,tegra124-soctherm";
+		reg = <0x0 0x700e2000 0x0 0x1000>;
+		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&tegra_car TEGRA124_CLK_TSENSOR>,
+			<&tegra_car TEGRA124_CLK_SOC_THERM>;
+		clock-names = "tsensor", "soctherm";
+		resets = <&tegra_car 78>;
+		reset-names = "soctherm";
+		#thermal-sensor-cells = <1>;
+	};
+
 	ahub@0,70300000 {
 		compatible = "nvidia,tegra124-ahub";
 		reg = <0x0 0x70300000 0x0 0x200>,
@@ -898,6 +911,40 @@
 		};
 	};
 
+	thermal-zones {
+		cpu {
+			polling-delay-passive = <1000>;
+			polling-delay = <1000>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+		};
+
+		mem {
+			polling-delay-passive = <1000>;
+			polling-delay = <1000>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
+		};
+
+		gpu {
+			polling-delay-passive = <1000>;
+			polling-delay = <1000>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_GPU>;
+		};
+
+		pllx {
+			polling-delay-passive = <1000>;
+			polling-delay = <1000>;
+
+			thermal-sensors =
+				<&soctherm TEGRA124_SOCTHERM_SENSOR_PLLX>;
+		};
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		interrupts = <GIC_PPI 13
diff --git a/arch/arm/configs/ape6evm_defconfig b/arch/arm/configs/ape6evm_defconfig
index db81d8c..9e9a72e 100644
--- a/arch/arm/configs/ape6evm_defconfig
+++ b/arch/arm/configs/ape6evm_defconfig
@@ -33,7 +33,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_BINFMT_MISC=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index d9675c6..5666e37 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -43,7 +43,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 83a87e4..7117662 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -39,7 +39,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=y
diff --git a/arch/arm/configs/bockw_defconfig b/arch/arm/configs/bockw_defconfig
index 1dde5da..3125e00 100644
--- a/arch/arm/configs/bockw_defconfig
+++ b/arch/arm/configs/bockw_defconfig
@@ -29,7 +29,7 @@
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_VFP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 759f9b0..235842c 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -49,7 +49,7 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_ONDEMAND=m
 CONFIG_CPU_IDLE=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index c419907..5ef14de 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -27,7 +27,7 @@
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index eb440aa..ea316c4 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -39,7 +39,6 @@
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
 CONFIG_APM_EMULATION=y
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index 1fe3621f..1125436 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -18,7 +18,7 @@
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_NEON=y
 CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 182e546..18e59fe 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -31,7 +31,6 @@
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
 CONFIG_APM_EMULATION=y
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f707cd2..7c2075a 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -54,7 +54,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_BINFMT_MISC=m
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TEST_SUSPEND=y
 CONFIG_NET=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 20a3ff9..a2067cb 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -30,7 +30,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig
index 8cb115d..5d63fc5 100644
--- a/arch/arm/configs/kzm9g_defconfig
+++ b/arch/arm/configs/kzm9g_defconfig
@@ -43,7 +43,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/lager_defconfig b/arch/arm/configs/lager_defconfig
index 929c571..a82afc9 100644
--- a/arch/arm/configs/lager_defconfig
+++ b/arch/arm/configs/lager_defconfig
@@ -37,7 +37,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/mackerel_defconfig b/arch/arm/configs/mackerel_defconfig
index 57ececb..05a5293 100644
--- a/arch/arm/configs/mackerel_defconfig
+++ b/arch/arm/configs/mackerel_defconfig
@@ -28,7 +28,6 @@
 CONFIG_VFP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_PM=y
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
index ff91630..3c8b6d8 100644
--- a/arch/arm/configs/marzen_defconfig
+++ b/arch/arm/configs/marzen_defconfig
@@ -33,7 +33,7 @@
 CONFIG_VFP=y
 CONFIG_KEXEC=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 115cda9..a7dce67 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -63,7 +63,6 @@
 CONFIG_BINFMT_MISC=y
 CONFIG_PM=y
 # CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
index 23591db..f610230 100644
--- a/arch/arm/configs/prima2_defconfig
+++ b/arch/arm/configs/prima2_defconfig
@@ -18,7 +18,7 @@
 CONFIG_AEABI=y
 CONFIG_KEXEC=y
 CONFIG_BINFMT_MISC=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index b58fb32..afa2479 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -32,7 +32,7 @@
 CONFIG_NEON=y
 CONFIG_KERNEL_MODE_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_ADVANCED_DEBUG=y
 CONFIG_NET=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index df2c0f5..3df6ca0 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -39,7 +39,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index f7ac037..7a342d2 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -11,7 +11,7 @@
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 40750f9..3ea9c33 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -46,7 +46,7 @@
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index d219d6a..6a1c989 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -25,7 +25,7 @@
 CONFIG_ARM_U8500_CPUIDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/vt8500_v6_v7_defconfig b/arch/arm/configs/vt8500_v6_v7_defconfig
index 9e7a256..1bfaa7b 100644
--- a/arch/arm/configs/vt8500_v6_v7_defconfig
+++ b/arch/arm/configs/vt8500_v6_v7_defconfig
@@ -16,7 +16,7 @@
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index b9db269..66ce176 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -33,6 +33,11 @@
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hcr = HCR_GUEST_MASK;
+}
+
 static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
 {
 	return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 53036e2..254e065 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -150,8 +150,6 @@
 	u32 halt_wakeup;
 };
 
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
-			const struct kvm_vcpu_init *init);
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index acb0d57..63e0ecc 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -52,6 +52,7 @@
 void free_boot_hyp_pgd(void);
 void free_hyp_pgds(void);
 
+void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -161,9 +162,10 @@
 }
 
 static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-					     unsigned long size)
+					     unsigned long size,
+					     bool ipa_uncached)
 {
-	if (!vcpu_has_cache_enabled(vcpu))
+	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
 		kvm_flush_dcache_to_poc((void *)hva, size);
 	
 	/*
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e34934f..f7c65ad 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -484,7 +484,7 @@
 	armpmu->stop(armpmu);
 }
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int armpmu_runtime_resume(struct device *dev)
 {
 	struct arm_pmu_platdata *plat = dev_get_platdata(dev);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9e193c8..2d6d910 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -213,6 +213,11 @@
 	int err;
 	struct kvm_vcpu *vcpu;
 
+	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
+		err = -EBUSY;
+		goto out;
+	}
+
 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
 	if (!vcpu) {
 		err = -ENOMEM;
@@ -263,6 +268,7 @@
 {
 	/* Force users to call KVM_ARM_VCPU_INIT */
 	vcpu->arch.target = -1;
+	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 
 	/* Set up the timer */
 	kvm_timer_vcpu_init(vcpu);
@@ -419,6 +425,7 @@
 
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 {
+	struct kvm *kvm = vcpu->kvm;
 	int ret;
 
 	if (likely(vcpu->arch.has_run_once))
@@ -427,15 +434,23 @@
 	vcpu->arch.has_run_once = true;
 
 	/*
-	 * Initialize the VGIC before running a vcpu the first time on
-	 * this VM.
+	 * Map the VGIC hardware resources before running a vcpu the first
+	 * time on this VM.
 	 */
-	if (unlikely(!vgic_initialized(vcpu->kvm))) {
-		ret = kvm_vgic_init(vcpu->kvm);
+	if (unlikely(!vgic_ready(kvm))) {
+		ret = kvm_vgic_map_resources(kvm);
 		if (ret)
 			return ret;
 	}
 
+	/*
+	 * Enable the arch timers only if we have an in-kernel VGIC
+	 * and it has been properly initialized, since we cannot handle
+	 * interrupts from the virtual timer with a userspace gic.
+	 */
+	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
+		kvm_timer_enable(kvm);
+
 	return 0;
 }
 
@@ -649,6 +664,48 @@
 	return -EINVAL;
 }
 
+static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+			       const struct kvm_vcpu_init *init)
+{
+	unsigned int i;
+	int phys_target = kvm_target_cpu();
+
+	if (init->target != phys_target)
+		return -EINVAL;
+
+	/*
+	 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+	 * use the same target.
+	 */
+	if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
+		return -EINVAL;
+
+	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+	for (i = 0; i < sizeof(init->features) * 8; i++) {
+		bool set = (init->features[i / 32] & (1 << (i % 32)));
+
+		if (set && i >= KVM_VCPU_MAX_FEATURES)
+			return -ENOENT;
+
+		/*
+		 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+		 * use the same feature set.
+		 */
+		if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
+		    test_bit(i, vcpu->arch.features) != set)
+			return -EINVAL;
+
+		if (set)
+			set_bit(i, vcpu->arch.features);
+	}
+
+	vcpu->arch.target = phys_target;
+
+	/* Now we know what it is, we can reset it. */
+	return kvm_reset_vcpu(vcpu);
+}
+
+
 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
 					 struct kvm_vcpu_init *init)
 {
@@ -659,10 +716,21 @@
 		return ret;
 
 	/*
+	 * Ensure a rebooted VM will fault in RAM pages and detect if the
+	 * guest MMU is turned off and flush the caches as needed.
+	 */
+	if (vcpu->arch.has_run_once)
+		stage2_unmap_vm(vcpu->kvm);
+
+	vcpu_reset_hcr(vcpu);
+
+	/*
 	 * Handle the "start in power-off" case by marking the VCPU as paused.
 	 */
-	if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
 		vcpu->arch.pause = true;
+	else
+		vcpu->arch.pause = false;
 
 	return 0;
 }
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index cc0b787..384bab6 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -38,7 +38,6 @@
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
-	vcpu->arch.hcr = HCR_GUEST_MASK;
 	return 0;
 }
 
@@ -274,31 +273,6 @@
 	}
 }
 
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
-			const struct kvm_vcpu_init *init)
-{
-	unsigned int i;
-
-	/* We can only cope with guest==host and only on A15/A7 (for now). */
-	if (init->target != kvm_target_cpu())
-		return -EINVAL;
-
-	vcpu->arch.target = init->target;
-	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
-	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
-	for (i = 0; i < sizeof(init->features) * 8; i++) {
-		if (test_bit(i, (void *)init->features)) {
-			if (i >= KVM_VCPU_MAX_FEATURES)
-				return -ENOENT;
-			set_bit(i, vcpu->arch.features);
-		}
-	}
-
-	/* Now we know what it is, we can reset it. */
-	return kvm_reset_vcpu(vcpu);
-}
-
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
 {
 	int target = kvm_target_cpu();
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 4cb5a93..5d3bfc0 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -187,15 +187,18 @@
 	}
 
 	rt = vcpu->arch.mmio_decode.rt;
-	data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
 
-	trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
-					 KVM_TRACE_MMIO_READ_UNSATISFIED,
-			mmio.len, fault_ipa,
-			(mmio.is_write) ? data : 0);
+	if (mmio.is_write) {
+		data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
+					       mmio.len);
 
-	if (mmio.is_write)
+		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
+			       fault_ipa, data);
 		mmio_write_buf(mmio.data, mmio.len, data);
+	} else {
+		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
+			       fault_ipa, 0);
+	}
 
 	if (vgic_handle_mmio(vcpu, run, &mmio))
 		return 1;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8664ff1..1dc9778 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -612,6 +612,71 @@
 	unmap_range(kvm, kvm->arch.pgd, start, size);
 }
 
+static void stage2_unmap_memslot(struct kvm *kvm,
+				 struct kvm_memory_slot *memslot)
+{
+	hva_t hva = memslot->userspace_addr;
+	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+	phys_addr_t size = PAGE_SIZE * memslot->npages;
+	hva_t reg_end = hva + size;
+
+	/*
+	 * A memory region could potentially cover multiple VMAs, and any holes
+	 * between them, so iterate over all of them to find out if we should
+	 * unmap any of them.
+	 *
+	 *     +--------------------------------------------+
+	 * +---------------+----------------+   +----------------+
+	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
+	 * +---------------+----------------+   +----------------+
+	 *     |               memory region                |
+	 *     +--------------------------------------------+
+	 */
+	do {
+		struct vm_area_struct *vma = find_vma(current->mm, hva);
+		hva_t vm_start, vm_end;
+
+		if (!vma || vma->vm_start >= reg_end)
+			break;
+
+		/*
+		 * Take the intersection of this VMA with the memory region
+		 */
+		vm_start = max(hva, vma->vm_start);
+		vm_end = min(reg_end, vma->vm_end);
+
+		if (!(vma->vm_flags & VM_PFNMAP)) {
+			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
+			unmap_stage2_range(kvm, gpa, vm_end - vm_start);
+		}
+		hva = vm_end;
+	} while (hva < reg_end);
+}
+
+/**
+ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the memregions and unmap any reguler RAM
+ * backing memory already mapped to the VM.
+ */
+void stage2_unmap_vm(struct kvm *kvm)
+{
+	struct kvm_memslots *slots;
+	struct kvm_memory_slot *memslot;
+	int idx;
+
+	idx = srcu_read_lock(&kvm->srcu);
+	spin_lock(&kvm->mmu_lock);
+
+	slots = kvm_memslots(kvm);
+	kvm_for_each_memslot(memslot, slots)
+		stage2_unmap_memslot(kvm, memslot);
+
+	spin_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+}
+
 /**
  * kvm_free_stage2_pgd - free all stage-2 tables
  * @kvm:	The KVM struct pointer for the VM.
@@ -853,6 +918,7 @@
 	struct vm_area_struct *vma;
 	pfn_t pfn;
 	pgprot_t mem_type = PAGE_S2;
+	bool fault_ipa_uncached;
 
 	write_fault = kvm_is_write_fault(vcpu);
 	if (fault_status == FSC_PERM && !write_fault) {
@@ -919,6 +985,8 @@
 	if (!hugetlb && !force_pte)
 		hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
 
+	fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
+
 	if (hugetlb) {
 		pmd_t new_pmd = pfn_pmd(pfn, mem_type);
 		new_pmd = pmd_mkhuge(new_pmd);
@@ -926,7 +994,8 @@
 			kvm_set_s2pmd_writable(&new_pmd);
 			kvm_set_pfn_dirty(pfn);
 		}
-		coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
+		coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
+					  fault_ipa_uncached);
 		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
 	} else {
 		pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -934,7 +1003,8 @@
 			kvm_set_s2pte_writable(&new_pte);
 			kvm_set_pfn_dirty(pfn);
 		}
-		coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
+		coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
+					  fault_ipa_uncached);
 		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
 			pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
 	}
@@ -1294,11 +1364,12 @@
 		hva = vm_end;
 	} while (hva < reg_end);
 
-	if (ret) {
-		spin_lock(&kvm->mmu_lock);
+	spin_lock(&kvm->mmu_lock);
+	if (ret)
 		unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
-		spin_unlock(&kvm->mmu_lock);
-	}
+	else
+		stage2_flush_memslot(kvm, memslot);
+	spin_unlock(&kvm->mmu_lock);
 	return ret;
 }
 
@@ -1310,6 +1381,15 @@
 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 			    unsigned long npages)
 {
+	/*
+	 * Readonly memslots are not incoherent with the caches by definition,
+	 * but in practice, they are used mostly to emulate ROMs or NOR flashes
+	 * that the guest may consider devices and hence map as uncached.
+	 * To prevent incoherency issues in these cases, tag all readonly
+	 * regions as incoherent.
+	 */
+	if (slot->flags & KVM_MEM_READONLY)
+		slot->flags |= KVM_MEMSLOT_INCOHERENT;
 	return 0;
 }
 
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 09cf377..58cb324 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -15,6 +15,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/preempt.h>
 #include <linux/kvm_host.h>
 #include <linux/wait.h>
 
@@ -166,6 +167,23 @@
 
 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
 {
+	int i;
+	struct kvm_vcpu *tmp;
+
+	/*
+	 * The KVM ABI specifies that a system event exit may call KVM_RUN
+	 * again and may perform shutdown/reboot at a later time that when the
+	 * actual request is made.  Since we are implementing PSCI and a
+	 * caller of PSCI reboot and shutdown expects that the system shuts
+	 * down or reboots immediately, let's make sure that VCPUs are not run
+	 * after this call is handled and before the VCPUs have been
+	 * re-initialized.
+	 */
+	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+		tmp->arch.pause = true;
+		kvm_vcpu_kick(tmp);
+	}
+
 	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
 	vcpu->run->system_event.type = type;
 	vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c
index 6b98413..641edc3 100644
--- a/arch/arm/mach-davinci/pm_domain.c
+++ b/arch/arm/mach-davinci/pm_domain.c
@@ -14,7 +14,7 @@
 #include <linux/pm_clock.h>
 #include <linux/platform_device.h>
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int davinci_pm_runtime_suspend(struct device *dev)
 {
 	int ret;
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e4a00ba..603820e 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -21,7 +21,7 @@
 	select HAVE_S3C_RTC if RTC_CLASS
 	select PINCTRL
 	select PINCTRL_EXYNOS
-	select PM_GENERIC_DOMAINS if PM_RUNTIME
+	select PM_GENERIC_DOMAINS if PM
 	select S5P_DEV_MFC
 	select SRAM
 	select MFD_SYSCON
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
index ca79dda..ef6041e 100644
--- a/arch/arm/mach-keystone/pm_domain.c
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -19,7 +19,7 @@
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int keystone_pm_runtime_suspend(struct device *dev)
 {
 	int ret;
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 3f2d396..c40e209 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -21,7 +21,7 @@
 
 #include "soc.h"
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int omap1_pm_runtime_suspend(struct device *dev)
 {
 	int ret;
@@ -59,7 +59,7 @@
 #define OMAP1_PM_DOMAIN (&default_pm_domain)
 #else
 #define OMAP1_PM_DOMAIN NULL
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
 
 static struct pm_clk_notifier_block platform_bus_notifier = {
 	.pm_domain = OMAP1_PM_DOMAIN,
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index f0edec1..6ab656c 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -15,7 +15,7 @@
 	select ARM_CPU_SUSPEND if PM
 	select OMAP_INTERCONNECT
 	select PM_OPP if PM
-	select PM_RUNTIME if CPU_IDLE
+	select PM if CPU_IDLE
 	select SOC_HAS_OMAP2_SDRC
 
 config ARCH_OMAP4
@@ -32,7 +32,7 @@
 	select PL310_ERRATA_588369 if CACHE_L2X0
 	select PL310_ERRATA_727915 if CACHE_L2X0
 	select PM_OPP if PM
-	select PM_RUNTIME if CPU_IDLE
+	select PM if CPU_IDLE
 	select ARM_ERRATA_754322
 	select ARM_ERRATA_775420
 
@@ -103,7 +103,7 @@
 	select I2C_OMAP
 	select MENELAUS if ARCH_OMAP2
 	select NEON if CPU_V7
-	select PM_RUNTIME
+	select PM
 	select REGULATOR
 	select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
 	select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 4fc8383..a1bd6af 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -361,7 +361,7 @@
 	u8 postsetup_state;
 
 	/* Set the default postsetup state for all hwmods */
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 	postsetup_state = _HWMOD_STATE_IDLE;
 #else
 	postsetup_state = _HWMOD_STATE_ENABLED;
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 8c58b71..be9541e 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -588,7 +588,7 @@
 	return ERR_PTR(ret);
 }
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int _od_runtime_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5674a55..8127e45 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -38,6 +38,11 @@
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+}
+
 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 {
 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2012c4b..0b7dfdb 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -165,8 +165,6 @@
 	u32 halt_wakeup;
 };
 
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
-			const struct kvm_vcpu_init *init);
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
@@ -200,6 +198,7 @@
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 
 u64 kvm_call_hyp(void *hypfn, ...);
+void force_vm_exit(const cpumask_t *mask);
 
 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		int exception_index);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0caf7a5..14a74f1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -83,6 +83,7 @@
 void free_boot_hyp_pgd(void);
 void free_hyp_pgds(void);
 
+void stage2_unmap_vm(struct kvm *kvm);
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -243,9 +244,10 @@
 }
 
 static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-					     unsigned long size)
+					     unsigned long size,
+					     bool ipa_uncached)
 {
-	if (!vcpu_has_cache_enabled(vcpu))
+	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
 		kvm_flush_dcache_to_poc((void *)hva, size);
 
 	if (!icache_is_aliasing()) {		/* PIPT */
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 7679469..9535bd5 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -38,7 +38,6 @@
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
-	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
 	return 0;
 }
 
@@ -297,31 +296,6 @@
 	return -EINVAL;
 }
 
-int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
-			const struct kvm_vcpu_init *init)
-{
-	unsigned int i;
-	int phys_target = kvm_target_cpu();
-
-	if (init->target != phys_target)
-		return -EINVAL;
-
-	vcpu->arch.target = phys_target;
-	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
-	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
-	for (i = 0; i < sizeof(init->features) * 8; i++) {
-		if (init->features[i / 32] & (1 << (i % 32))) {
-			if (i >= KVM_VCPU_MAX_FEATURES)
-				return -ENOENT;
-			set_bit(i, vcpu->arch.features);
-		}
-	}
-
-	/* Now we know what it is, we can reset it. */
-	return kvm_reset_vcpu(vcpu);
-}
-
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
 {
 	int target = kvm_target_cpu();
diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c
index b0a608d..b964c66 100644
--- a/arch/cris/arch-v10/lib/usercopy.c
+++ b/arch/cris/arch-v10/lib/usercopy.c
@@ -30,8 +30,7 @@
 /* Copy to userspace.  This is based on the memcpy used for
    kernel-to-kernel copying; see "string.c".  */
 
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -187,13 +186,14 @@
 
   return retn;
 }
+EXPORT_SYMBOL(__copy_user);
 
 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
    userland.  The return-value is the number of bytes that were
    inaccessible.  */
 
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+				  unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -369,11 +369,10 @@
 
   return retn + n;
 }
+EXPORT_SYMBOL(__copy_user_zeroing);
 
 /* Zero userspace.  */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -521,3 +520,4 @@
 
   return retn;
 }
+EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 15a9ed1..4fc16b4 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -108,6 +108,7 @@
 	select MTD_JEDECPROBE
 	select MTD_BLOCK
 	select MTD_COMPLEX_MAPPINGS
+	select MTD_MTDRAM
 	help
 	  This option enables MTD mapping of flash devices.  Needed to use
 	  flash memories.  If unsure, say Y.
@@ -358,13 +359,6 @@
 	default MMC
 	select SPI
 	select MMC_SPI
-	select ETRAX_SPI_MMC_BOARD
-
-# For the parts that can't be a module (due to restrictions in
-# framework elsewhere).
-config ETRAX_SPI_MMC_BOARD
-       boolean
-       default n
 
 # While the board info is MMC_SPI only, the drivers are written to be
 # independent of MMC_SPI, so we'll keep SPI non-dependent on the
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile
index 39aa3c1..15fbfef 100644
--- a/arch/cris/arch-v32/drivers/Makefile
+++ b/arch/cris/arch-v32/drivers/Makefile
@@ -10,4 +10,3 @@
 obj-$(CONFIG_ETRAX_I2C)			+= i2c.o
 obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL)	+= sync_serial.o
 obj-$(CONFIG_PCI)			+= pci/
-obj-$(CONFIG_ETRAX_SPI_MMC_BOARD)	+= board_mmcspi.o
diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h
index c073cf4..d9cc856 100644
--- a/arch/cris/arch-v32/drivers/i2c.h
+++ b/arch/cris/arch-v32/drivers/i2c.h
@@ -2,7 +2,6 @@
 #include <linux/init.h>
 
 /* High level I2C actions */
-int __init i2c_init(void);
 int i2c_write(unsigned char theSlave, void *data, size_t nbytes);
 int i2c_read(unsigned char theSlave, void *data, size_t nbytes);
 int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 5a14913..08a313f 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1,8 +1,7 @@
 /*
- * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
+ * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
  *
- * Copyright (c) 2005 Axis Communications AB
- *
+ * Copyright (c) 2005, 2008 Axis Communications AB
  * Author: Mikael Starvik
  *
  */
@@ -16,16 +15,17 @@
 #include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
 #include <linux/wait.h>
 
 #include <asm/io.h>
-#include <dma.h>
+#include <mach/dma.h>
 #include <pinmux.h>
 #include <hwregs/reg_rdwr.h>
 #include <hwregs/sser_defs.h>
+#include <hwregs/timer_defs.h>
 #include <hwregs/dma_defs.h>
 #include <hwregs/dma.h>
 #include <hwregs/intr_vect_defs.h>
@@ -59,22 +59,23 @@
 /* the rest of the data pointed out by Descr1 and set readp to the start */
 /* of Descr2                                                             */
 
-#define SYNC_SERIAL_MAJOR 125
-
 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
 /* words can be handled */
-#define IN_BUFFER_SIZE 12288
-#define IN_DESCR_SIZE 256
-#define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
+#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
+#define NBR_IN_DESCR (8*6)
+#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
 
-#define OUT_BUFFER_SIZE 1024*8
 #define NBR_OUT_DESCR 8
+#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
 
 #define DEFAULT_FRAME_RATE 0
 #define DEFAULT_WORD_RATE 7
 
+/* To be removed when we move to pure udev. */
+#define SYNC_SERIAL_MAJOR 125
+
 /* NOTE: Enabling some debug will likely cause overrun or underrun,
- * especially if manual mode is use.
+ * especially if manual mode is used.
  */
 #define DEBUG(x)
 #define DEBUGREAD(x)
@@ -85,11 +86,28 @@
 #define DEBUGTRDMA(x)
 #define DEBUGOUTBUF(x)
 
-typedef struct sync_port
-{
-	reg_scope_instances regi_sser;
-	reg_scope_instances regi_dmain;
-	reg_scope_instances regi_dmaout;
+enum syncser_irq_setup {
+	no_irq_setup = 0,
+	dma_irq_setup = 1,
+	manual_irq_setup = 2,
+};
+
+struct sync_port {
+	unsigned long regi_sser;
+	unsigned long regi_dmain;
+	unsigned long regi_dmaout;
+
+	/* Interrupt vectors. */
+	unsigned long dma_in_intr_vect; /* Used for DMA in. */
+	unsigned long dma_out_intr_vect; /* Used for DMA out. */
+	unsigned long syncser_intr_vect; /* Used when no DMA. */
+
+	/* DMA number for in and out. */
+	unsigned int dma_in_nbr;
+	unsigned int dma_out_nbr;
+
+	/* DMA owner. */
+	enum dma_owner req_dma;
 
 	char started; /* 1 if port has been started */
 	char port_nbr; /* Port 0 or 1 */
@@ -99,22 +117,29 @@
 	char use_dma;  /* 1 if port uses dma */
 	char tr_running;
 
-	char init_irqs;
+	enum syncser_irq_setup init_irqs;
 	int output;
 	int input;
 
 	/* Next byte to be read by application */
-	volatile unsigned char *volatile readp;
+	unsigned char *readp;
 	/* Next byte to be written by etrax */
-	volatile unsigned char *volatile writep;
+	unsigned char *writep;
 
 	unsigned int in_buffer_size;
+	unsigned int in_buffer_len;
 	unsigned int inbufchunk;
-	unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
-	unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
-	unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
-	struct dma_descr_data* next_rx_desc;
-	struct dma_descr_data* prev_rx_desc;
+	/* Data buffers for in and output. */
+	unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
+	unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
+	unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
+	struct timespec timestamp[NBR_IN_DESCR];
+	struct dma_descr_data *next_rx_desc;
+	struct dma_descr_data *prev_rx_desc;
+
+	struct timeval last_timestamp;
+	int read_ts_idx;
+	int write_ts_idx;
 
 	/* Pointer to the first available descriptor in the ring,
 	 * unless active_tr_descr == catch_tr_descr and a dma
@@ -135,114 +160,138 @@
 	/* Number of bytes currently locked for being read by DMA */
 	int out_buf_count;
 
-	dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
-	dma_descr_context in_context __attribute__ ((__aligned__(32)));
-	dma_descr_data out_descr[NBR_OUT_DESCR]
-		__attribute__ ((__aligned__(16)));
-	dma_descr_context out_context __attribute__ ((__aligned__(32)));
+	dma_descr_context in_context __aligned(32);
+	dma_descr_context out_context __aligned(32);
+	dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
+	dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
+
 	wait_queue_head_t out_wait_q;
 	wait_queue_head_t in_wait_q;
 
 	spinlock_t lock;
-} sync_port;
+};
 
 static DEFINE_MUTEX(sync_serial_mutex);
 static int etrax_sync_serial_init(void);
 static void initialize_port(int portnbr);
 static inline int sync_data_avail(struct sync_port *port);
 
-static int sync_serial_open(struct inode *, struct file*);
-static int sync_serial_release(struct inode*, struct file*);
+static int sync_serial_open(struct inode *, struct file *);
+static int sync_serial_release(struct inode *, struct file *);
 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
 
-static int sync_serial_ioctl(struct file *,
-			     unsigned int cmd, unsigned long arg);
-static ssize_t sync_serial_write(struct file * file, const char * buf,
+static long sync_serial_ioctl(struct file *file,
+			      unsigned int cmd, unsigned long arg);
+static int sync_serial_ioctl_unlocked(struct file *file,
+				      unsigned int cmd, unsigned long arg);
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
 				 size_t count, loff_t *ppos);
-static ssize_t sync_serial_read(struct file *file, char *buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos);
 
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
-     defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
-    (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
-     defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
+#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
+	defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
+	(defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
+	defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
 #define SYNC_SER_DMA
+#else
+#define SYNC_SER_MANUAL
 #endif
 
-static void send_word(sync_port* port);
-static void start_dma_out(struct sync_port *port, const char *data, int count);
-static void start_dma_in(sync_port* port);
 #ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count);
+static void start_dma_in(struct sync_port *port);
 static irqreturn_t tr_interrupt(int irq, void *dev_id);
 static irqreturn_t rx_interrupt(int irq, void *dev_id);
 #endif
-
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
-     !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
-    (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
-     !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
-#define SYNC_SER_MANUAL
-#endif
 #ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port);
 static irqreturn_t manual_interrupt(int irq, void *dev_id);
 #endif
 
-#ifdef CONFIG_ETRAXFS	/* ETRAX FS */
-#define OUT_DMA_NBR 4
-#define IN_DMA_NBR 5
-#define PINMUX_SSER pinmux_sser0
-#define SYNCSER_INST regi_sser0
-#define SYNCSER_INTR_VECT SSER0_INTR_VECT
-#define OUT_DMA_INST regi_dma4
-#define IN_DMA_INST regi_dma5
-#define DMA_OUT_INTR_VECT DMA4_INTR_VECT
-#define DMA_IN_INTR_VECT DMA5_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser0
-#else			/* Artpec-3 */
-#define OUT_DMA_NBR 6
-#define IN_DMA_NBR 7
-#define PINMUX_SSER pinmux_sser
-#define SYNCSER_INST regi_sser
-#define SYNCSER_INTR_VECT SSER_INTR_VECT
-#define OUT_DMA_INST regi_dma6
-#define IN_DMA_INST regi_dma7
-#define DMA_OUT_INTR_VECT DMA6_INTR_VECT
-#define DMA_IN_INTR_VECT DMA7_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser
+#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
+#define artpec_request_dma crisv32_request_dma
+#define artpec_free_dma crisv32_free_dma
+
+#ifdef CONFIG_ETRAXFS
+/* ETRAX FS */
+#define DMA_OUT_NBR0		SYNC_SER0_TX_DMA_NBR
+#define DMA_IN_NBR0		SYNC_SER0_RX_DMA_NBR
+#define DMA_OUT_NBR1		SYNC_SER1_TX_DMA_NBR
+#define DMA_IN_NBR1		SYNC_SER1_RX_DMA_NBR
+#define PINMUX_SSER0		pinmux_sser0
+#define PINMUX_SSER1		pinmux_sser1
+#define SYNCSER_INST0		regi_sser0
+#define SYNCSER_INST1		regi_sser1
+#define SYNCSER_INTR_VECT0	SSER0_INTR_VECT
+#define SYNCSER_INTR_VECT1	SSER1_INTR_VECT
+#define OUT_DMA_INST0		regi_dma4
+#define IN_DMA_INST0		regi_dma5
+#define DMA_OUT_INTR_VECT0	DMA4_INTR_VECT
+#define DMA_OUT_INTR_VECT1	DMA7_INTR_VECT
+#define DMA_IN_INTR_VECT0	DMA5_INTR_VECT
+#define DMA_IN_INTR_VECT1	DMA6_INTR_VECT
+#define REQ_DMA_SYNCSER0	dma_sser0
+#define REQ_DMA_SYNCSER1	dma_sser1
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
+#define PORT1_DMA 1
+#else
+#define PORT1_DMA 0
+#endif
+#elif defined(CONFIG_CRIS_MACH_ARTPEC3)
+/* ARTPEC-3 */
+#define DMA_OUT_NBR0		SYNC_SER_TX_DMA_NBR
+#define DMA_IN_NBR0		SYNC_SER_RX_DMA_NBR
+#define PINMUX_SSER0		pinmux_sser
+#define SYNCSER_INST0		regi_sser
+#define SYNCSER_INTR_VECT0	SSER_INTR_VECT
+#define OUT_DMA_INST0		regi_dma6
+#define IN_DMA_INST0		regi_dma7
+#define DMA_OUT_INTR_VECT0	DMA6_INTR_VECT
+#define DMA_IN_INTR_VECT0	DMA7_INTR_VECT
+#define REQ_DMA_SYNCSER0	dma_sser
+#define REQ_DMA_SYNCSER1	dma_sser
+#endif
+
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
+#define PORT0_DMA 1
+#else
+#define PORT0_DMA 0
 #endif
 
 /* The ports */
-static struct sync_port ports[]=
-{
+static struct sync_port ports[] = {
 	{
-		.regi_sser             = SYNCSER_INST,
-		.regi_dmaout           = OUT_DMA_INST,
-		.regi_dmain            = IN_DMA_INST,
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
-                .use_dma               = 1,
-#else
-                .use_dma               = 0,
-#endif
-	}
+		.regi_sser		= SYNCSER_INST0,
+		.regi_dmaout		= OUT_DMA_INST0,
+		.regi_dmain		= IN_DMA_INST0,
+		.use_dma		= PORT0_DMA,
+		.dma_in_intr_vect	= DMA_IN_INTR_VECT0,
+		.dma_out_intr_vect	= DMA_OUT_INTR_VECT0,
+		.dma_in_nbr		= DMA_IN_NBR0,
+		.dma_out_nbr		= DMA_OUT_NBR0,
+		.req_dma		= REQ_DMA_SYNCSER0,
+		.syncser_intr_vect	= SYNCSER_INTR_VECT0,
+	},
 #ifdef CONFIG_ETRAXFS
-	,
-
 	{
-		.regi_sser             = regi_sser1,
-		.regi_dmaout           = regi_dma6,
-		.regi_dmain            = regi_dma7,
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
-                .use_dma               = 1,
-#else
-                .use_dma               = 0,
-#endif
-	}
+		.regi_sser		= SYNCSER_INST1,
+		.regi_dmaout		= regi_dma6,
+		.regi_dmain		= regi_dma7,
+		.use_dma		= PORT1_DMA,
+		.dma_in_intr_vect	= DMA_IN_INTR_VECT1,
+		.dma_out_intr_vect	= DMA_OUT_INTR_VECT1,
+		.dma_in_nbr		= DMA_IN_NBR1,
+		.dma_out_nbr		= DMA_OUT_NBR1,
+		.req_dma		= REQ_DMA_SYNCSER1,
+		.syncser_intr_vect	= SYNCSER_INTR_VECT1,
+	},
 #endif
 };
 
 #define NBR_PORTS ARRAY_SIZE(ports)
 
-static const struct file_operations sync_serial_fops = {
+static const struct file_operations syncser_fops = {
 	.owner		= THIS_MODULE,
 	.write		= sync_serial_write,
 	.read		= sync_serial_read,
@@ -253,61 +302,40 @@
 	.llseek		= noop_llseek,
 };
 
-static int __init etrax_sync_serial_init(void)
+static dev_t syncser_first;
+static int minor_count = NBR_PORTS;
+#define SYNCSER_NAME "syncser"
+static struct cdev *syncser_cdev;
+static struct class *syncser_class;
+
+static void sync_serial_start_port(struct sync_port *port)
 {
-	ports[0].enabled = 0;
-#ifdef CONFIG_ETRAXFS
-	ports[1].enabled = 0;
-#endif
-	if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
-			&sync_serial_fops) < 0) {
-		printk(KERN_WARNING
-			"Unable to get major for synchronous serial port\n");
-		return -EBUSY;
-	}
-
-	/* Initialize Ports */
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
-	if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
-		printk(KERN_WARNING
-			"Unable to alloc pins for synchronous serial port 0\n");
-		return -EIO;
-	}
-	ports[0].enabled = 1;
-	initialize_port(0);
-#endif
-
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
-	if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
-		printk(KERN_WARNING
-			"Unable to alloc pins for synchronous serial port 0\n");
-		return -EIO;
-	}
-	ports[1].enabled = 1;
-	initialize_port(1);
-#endif
-
-#ifdef CONFIG_ETRAXFS
-	printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
-#else
-	printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
-#endif
-	return 0;
+	reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
+	reg_sser_rw_tr_cfg tr_cfg =
+		REG_RD(sser, port->regi_sser, rw_tr_cfg);
+	reg_sser_rw_rec_cfg rec_cfg =
+		REG_RD(sser, port->regi_sser, rw_rec_cfg);
+	cfg.en = regk_sser_yes;
+	tr_cfg.tr_en = regk_sser_yes;
+	rec_cfg.rec_en = regk_sser_yes;
+	REG_WR(sser, port->regi_sser, rw_cfg, cfg);
+	REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
+	REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
+	port->started = 1;
 }
 
 static void __init initialize_port(int portnbr)
 {
-	int __attribute__((unused)) i;
 	struct sync_port *port = &ports[portnbr];
-	reg_sser_rw_cfg cfg = {0};
-	reg_sser_rw_frm_cfg frm_cfg = {0};
-	reg_sser_rw_tr_cfg tr_cfg = {0};
-	reg_sser_rw_rec_cfg rec_cfg = {0};
+	reg_sser_rw_cfg cfg = { 0 };
+	reg_sser_rw_frm_cfg frm_cfg = { 0 };
+	reg_sser_rw_tr_cfg tr_cfg = { 0 };
+	reg_sser_rw_rec_cfg rec_cfg = { 0 };
 
-	DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
+	DEBUG(pr_info("Init sync serial port %d\n", portnbr));
 
 	port->port_nbr = portnbr;
-	port->init_irqs = 1;
+	port->init_irqs = no_irq_setup;
 
 	port->out_rd_ptr = port->out_buffer;
 	port->out_buf_count = 0;
@@ -318,10 +346,11 @@
 	port->readp = port->flip;
 	port->writep = port->flip;
 	port->in_buffer_size = IN_BUFFER_SIZE;
+	port->in_buffer_len = 0;
 	port->inbufchunk = IN_DESCR_SIZE;
-	port->next_rx_desc = &port->in_descr[0];
-	port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
-	port->prev_rx_desc->eol = 1;
+
+	port->read_ts_idx = 0;
+	port->write_ts_idx = 0;
 
 	init_waitqueue_head(&port->out_wait_q);
 	init_waitqueue_head(&port->in_wait_q);
@@ -368,14 +397,18 @@
 	REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
 
 #ifdef SYNC_SER_DMA
-	/* Setup the descriptor ring for dma out/transmit. */
-	for (i = 0; i < NBR_OUT_DESCR; i++) {
-		port->out_descr[i].wait = 0;
-		port->out_descr[i].intr = 1;
-		port->out_descr[i].eol = 0;
-		port->out_descr[i].out_eop = 0;
-		port->out_descr[i].next =
-			(dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
+	{
+		int i;
+		/* Setup the descriptor ring for dma out/transmit. */
+		for (i = 0; i < NBR_OUT_DESCR; i++) {
+			dma_descr_data *descr = &port->out_descr[i];
+			descr->wait = 0;
+			descr->intr = 1;
+			descr->eol = 0;
+			descr->out_eop = 0;
+			descr->next =
+				(dma_descr_data *)virt_to_phys(&descr[i+1]);
+		}
 	}
 
 	/* Create a ring from the list. */
@@ -391,201 +424,116 @@
 
 static inline int sync_data_avail(struct sync_port *port)
 {
-	int avail;
-	unsigned char *start;
-	unsigned char *end;
-
-	start = (unsigned char*)port->readp; /* cast away volatile */
-	end = (unsigned char*)port->writep;  /* cast away volatile */
-	/* 0123456789  0123456789
-	 *  -----      -    -----
-	 *  ^rp  ^wp    ^wp ^rp
-	 */
-
-	if (end >= start)
-		avail = end - start;
-	else
-		avail = port->in_buffer_size - (start - end);
-	return avail;
-}
-
-static inline int sync_data_avail_to_end(struct sync_port *port)
-{
-	int avail;
-	unsigned char *start;
-	unsigned char *end;
-
-	start = (unsigned char*)port->readp; /* cast away volatile */
-	end = (unsigned char*)port->writep;  /* cast away volatile */
-	/* 0123456789  0123456789
-	 *  -----           -----
-	 *  ^rp  ^wp    ^wp ^rp
-	 */
-
-	if (end >= start)
-		avail = end - start;
-	else
-		avail = port->flip + port->in_buffer_size - start;
-	return avail;
+	return port->in_buffer_len;
 }
 
 static int sync_serial_open(struct inode *inode, struct file *file)
 {
+	int ret = 0;
 	int dev = iminor(inode);
-	int ret = -EBUSY;
-	sync_port *port;
-	reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
-	reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
+	struct sync_port *port;
+#ifdef SYNC_SER_DMA
+	reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
+	reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
+#endif
 
-	mutex_lock(&sync_serial_mutex);
-	DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
+	DEBUG(pr_debug("Open sync serial port %d\n", dev));
 
-	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-	{
-		DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
-		ret = -ENODEV;
-		goto out;
+	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+		DEBUG(pr_info("Invalid minor %d\n", dev));
+		return -ENODEV;
 	}
 	port = &ports[dev];
 	/* Allow open this device twice (assuming one reader and one writer) */
-	if (port->busy == 2)
-	{
-		DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
-		goto out;
+	if (port->busy == 2) {
+		DEBUG(pr_info("syncser%d is busy\n", dev));
+		return -EBUSY;
 	}
 
+	mutex_lock(&sync_serial_mutex);
 
-	if (port->init_irqs) {
-		if (port->use_dma) {
-			if (port == &ports[0]) {
-#ifdef SYNC_SER_DMA
-				if (request_irq(DMA_OUT_INTR_VECT,
-						tr_interrupt,
-						0,
-						"synchronous serial 0 dma tr",
-						&ports[0])) {
-					printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
-					goto out;
-				} else if (request_irq(DMA_IN_INTR_VECT,
-						rx_interrupt,
-						0,
-						"synchronous serial 1 dma rx",
-						&ports[0])) {
-					free_irq(DMA_OUT_INTR_VECT, &port[0]);
-					printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
-					goto out;
-				} else if (crisv32_request_dma(OUT_DMA_NBR,
-						"synchronous serial 0 dma tr",
-						DMA_VERBOSE_ON_ERROR,
-						0,
-						REQ_DMA_SYNCSER)) {
-					free_irq(DMA_OUT_INTR_VECT, &port[0]);
-					free_irq(DMA_IN_INTR_VECT, &port[0]);
-					printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
-					goto out;
-				} else if (crisv32_request_dma(IN_DMA_NBR,
-						"synchronous serial 0 dma rec",
-						DMA_VERBOSE_ON_ERROR,
-						0,
-						REQ_DMA_SYNCSER)) {
-					crisv32_free_dma(OUT_DMA_NBR);
-					free_irq(DMA_OUT_INTR_VECT, &port[0]);
-					free_irq(DMA_IN_INTR_VECT, &port[0]);
-					printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
-					goto out;
-				}
-#endif
-			}
-#ifdef CONFIG_ETRAXFS
-			else if (port == &ports[1]) {
-#ifdef SYNC_SER_DMA
-				if (request_irq(DMA6_INTR_VECT,
-						tr_interrupt,
-						0,
-						"synchronous serial 1 dma tr",
-						&ports[1])) {
-					printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
-					goto out;
-				} else if (request_irq(DMA7_INTR_VECT,
-						       rx_interrupt,
-						       0,
-						       "synchronous serial 1 dma rx",
-						       &ports[1])) {
-					free_irq(DMA6_INTR_VECT, &ports[1]);
-					printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
-					goto out;
-				} else if (crisv32_request_dma(
-						SYNC_SER1_TX_DMA_NBR,
-						"synchronous serial 1 dma tr",
-						DMA_VERBOSE_ON_ERROR,
-						0,
-						dma_sser1)) {
-					free_irq(DMA6_INTR_VECT, &ports[1]);
-					free_irq(DMA7_INTR_VECT, &ports[1]);
-					printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
-					goto out;
-				} else if (crisv32_request_dma(
-						SYNC_SER1_RX_DMA_NBR,
-						"synchronous serial 3 dma rec",
-						DMA_VERBOSE_ON_ERROR,
-						0,
-						dma_sser1)) {
-					crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
-					free_irq(DMA6_INTR_VECT, &ports[1]);
-					free_irq(DMA7_INTR_VECT, &ports[1]);
-					printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
-					goto out;
-				}
-#endif
-			}
-#endif
-                        /* Enable DMAs */
-			REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
-			REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
-			/* Enable DMA IRQs */
-			REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
-			REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
-			/* Set up wordsize = 1 for DMAs. */
-			DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
-			DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
+	/* Clear any stale date left in the flip buffer */
+	port->readp = port->writep = port->flip;
+	port->in_buffer_len = 0;
+	port->read_ts_idx = 0;
+	port->write_ts_idx = 0;
 
-			start_dma_in(port);
-			port->init_irqs = 0;
-		} else { /* !port->use_dma */
-#ifdef SYNC_SER_MANUAL
-			if (port == &ports[0]) {
-				if (request_irq(SYNCSER_INTR_VECT,
-						manual_interrupt,
-						0,
-						"synchronous serial manual irq",
-						&ports[0])) {
-					printk("Can't allocate sync serial manual irq");
-					goto out;
-				}
-			}
-#ifdef CONFIG_ETRAXFS
-			else if (port == &ports[1]) {
-				if (request_irq(SSER1_INTR_VECT,
-						manual_interrupt,
-						0,
-						"synchronous serial manual irq",
-						&ports[1])) {
-					printk(KERN_CRIT "Can't allocate sync serial manual irq");
-					goto out;
-				}
-			}
-#endif
-			port->init_irqs = 0;
-#else
-			panic("sync_serial: Manual mode not supported.\n");
-#endif /* SYNC_SER_MANUAL */
+	if (port->init_irqs != no_irq_setup) {
+		/* Init only on first call. */
+		port->busy++;
+		mutex_unlock(&sync_serial_mutex);
+		return 0;
+	}
+	if (port->use_dma) {
+#ifdef SYNC_SER_DMA
+		const char *tmp;
+		DEBUG(pr_info("Using DMA for syncser%d\n", dev));
+
+		tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
+		if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
+				tmp, port)) {
+			pr_err("Can't alloc syncser%d TX IRQ", dev);
+			ret = -EBUSY;
+			goto unlock_and_exit;
 		}
+		if (artpec_request_dma(port->dma_out_nbr, tmp,
+				DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+			free_irq(port->dma_out_intr_vect, port);
+			pr_err("Can't alloc syncser%d TX DMA", dev);
+			ret = -EBUSY;
+			goto unlock_and_exit;
+		}
+		tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
+		if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
+				tmp, port)) {
+			artpec_free_dma(port->dma_out_nbr);
+			free_irq(port->dma_out_intr_vect, port);
+			pr_err("Can't alloc syncser%d RX IRQ", dev);
+			ret = -EBUSY;
+			goto unlock_and_exit;
+		}
+		if (artpec_request_dma(port->dma_in_nbr, tmp,
+				DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+			artpec_free_dma(port->dma_out_nbr);
+			free_irq(port->dma_out_intr_vect, port);
+			free_irq(port->dma_in_intr_vect, port);
+			pr_err("Can't alloc syncser%d RX DMA", dev);
+			ret = -EBUSY;
+			goto unlock_and_exit;
+		}
+		/* Enable DMAs */
+		REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
+		REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
+		/* Enable DMA IRQs */
+		REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
+		REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
+		/* Set up wordsize = 1 for DMAs. */
+		DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
+		DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
 
-	} /* port->init_irqs */
-
+		start_dma_in(port);
+		port->init_irqs = dma_irq_setup;
+#endif
+	} else { /* !port->use_dma */
+#ifdef SYNC_SER_MANUAL
+		const char *tmp = dev == 0 ? "syncser0 manual irq" :
+					     "syncser1 manual irq";
+		if (request_irq(port->syncser_intr_vect, manual_interrupt,
+				0, tmp, port)) {
+			pr_err("Can't alloc syncser%d manual irq",
+				dev);
+			ret = -EBUSY;
+			goto unlock_and_exit;
+		}
+		port->init_irqs = manual_irq_setup;
+#else
+		panic("sync_serial: Manual mode not supported\n");
+#endif /* SYNC_SER_MANUAL */
+	}
 	port->busy++;
 	ret = 0;
-out:
+
+unlock_and_exit:
 	mutex_unlock(&sync_serial_mutex);
 	return ret;
 }
@@ -593,18 +541,17 @@
 static int sync_serial_release(struct inode *inode, struct file *file)
 {
 	int dev = iminor(inode);
-	sync_port *port;
+	struct sync_port *port;
 
-	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-	{
-		DEBUG(printk("Invalid minor %d\n", dev));
+	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+		DEBUG(pr_info("Invalid minor %d\n", dev));
 		return -ENODEV;
 	}
 	port = &ports[dev];
 	if (port->busy)
 		port->busy--;
 	if (!port->busy)
-          /* XXX */ ;
+		/* XXX */;
 	return 0;
 }
 
@@ -612,21 +559,15 @@
 {
 	int dev = iminor(file_inode(file));
 	unsigned int mask = 0;
-	sync_port *port;
-	DEBUGPOLL( static unsigned int prev_mask = 0; );
+	struct sync_port *port;
+	DEBUGPOLL(
+	static unsigned int prev_mask;
+	);
 
 	port = &ports[dev];
 
-	if (!port->started) {
-		reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
-		reg_sser_rw_rec_cfg rec_cfg =
-			REG_RD(sser, port->regi_sser, rw_rec_cfg);
-		cfg.en = regk_sser_yes;
-		rec_cfg.rec_en = port->input;
-		REG_WR(sser, port->regi_sser, rw_cfg, cfg);
-		REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
-		port->started = 1;
-	}
+	if (!port->started)
+		sync_serial_start_port(port);
 
 	poll_wait(file, &port->out_wait_q, wait);
 	poll_wait(file, &port->in_wait_q, wait);
@@ -645,33 +586,175 @@
 	if (port->input && sync_data_avail(port) >= port->inbufchunk)
 		mask |= POLLIN | POLLRDNORM;
 
-	DEBUGPOLL(if (mask != prev_mask)
-	      printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
-		     mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
-	      prev_mask = mask;
-	      );
+	DEBUGPOLL(
+	if (mask != prev_mask)
+		pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
+			mask,
+			mask & POLLOUT ? "POLLOUT" : "",
+			mask & POLLIN ? "POLLIN" : "");
+		prev_mask = mask;
+	);
 	return mask;
 }
 
-static int sync_serial_ioctl(struct file *file,
-		  unsigned int cmd, unsigned long arg)
+static ssize_t __sync_serial_read(struct file *file,
+				  char __user *buf,
+				  size_t count,
+				  loff_t *ppos,
+				  struct timespec *ts)
+{
+	unsigned long flags;
+	int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+	int avail;
+	struct sync_port *port;
+	unsigned char *start;
+	unsigned char *end;
+
+	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+		DEBUG(pr_info("Invalid minor %d\n", dev));
+		return -ENODEV;
+	}
+	port = &ports[dev];
+
+	if (!port->started)
+		sync_serial_start_port(port);
+
+	/* Calculate number of available bytes */
+	/* Save pointers to avoid that they are modified by interrupt */
+	spin_lock_irqsave(&port->lock, flags);
+	start = port->readp;
+	end = port->writep;
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	while ((start == end) && !port->in_buffer_len) {
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		wait_event_interruptible(port->in_wait_q,
+					 !(start == end && !port->full));
+
+		if (signal_pending(current))
+			return -EINTR;
+
+		spin_lock_irqsave(&port->lock, flags);
+		start = port->readp;
+		end = port->writep;
+		spin_unlock_irqrestore(&port->lock, flags);
+	}
+
+	DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
+			  dev, count,
+			  start - port->flip, end - port->flip,
+			  port->in_buffer_size));
+
+	/* Lazy read, never return wrapped data. */
+	if (end > start)
+		avail = end - start;
+	else
+		avail = port->flip + port->in_buffer_size - start;
+
+	count = count > avail ? avail : count;
+	if (copy_to_user(buf, start, count))
+		return -EFAULT;
+
+	/* If timestamp requested, find timestamp of first returned byte
+	 * and copy it.
+	 * N.B: Applications that request timstamps MUST read data in
+	 * chunks that are multiples of IN_DESCR_SIZE.
+	 * Otherwise the timestamps will not be aligned to the data read.
+	 */
+	if (ts != NULL) {
+		int idx = port->read_ts_idx;
+		memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
+		port->read_ts_idx += count / IN_DESCR_SIZE;
+		if (port->read_ts_idx >= NBR_IN_DESCR)
+			port->read_ts_idx = 0;
+	}
+
+	spin_lock_irqsave(&port->lock, flags);
+	port->readp += count;
+	/* Check for wrap */
+	if (port->readp >= port->flip + port->in_buffer_size)
+		port->readp = port->flip;
+	port->in_buffer_len -= count;
+	port->full = 0;
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	DEBUGREAD(pr_info("r %d\n", count));
+
+	return count;
+}
+
+static ssize_t sync_serial_input(struct file *file, unsigned long arg)
+{
+	struct ssp_request req;
+	int count;
+	int ret;
+
+	/* Copy the request structure from user-mode. */
+	ret = copy_from_user(&req, (struct ssp_request __user *)arg,
+		sizeof(struct ssp_request));
+
+	if (ret) {
+		DEBUG(pr_info("sync_serial_input copy from user failed\n"));
+		return -EFAULT;
+	}
+
+	/* To get the timestamps aligned, make sure that 'len'
+	 * is a multiple of IN_DESCR_SIZE.
+	 */
+	if ((req.len % IN_DESCR_SIZE) != 0) {
+		DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
+			      req.len, IN_DESCR_SIZE));
+		return -EFAULT;
+	}
+
+	/* Do the actual read. */
+	/* Note that req.buf is actually a pointer to user space. */
+	count = __sync_serial_read(file, req.buf, req.len,
+				   NULL, &req.ts);
+
+	if (count < 0) {
+		DEBUG(pr_info("sync_serial_input read failed\n"));
+		return count;
+	}
+
+	/* Copy the request back to user-mode. */
+	ret = copy_to_user((struct ssp_request __user *)arg, &req,
+		sizeof(struct ssp_request));
+
+	if (ret) {
+		DEBUG(pr_info("syncser input copy2user failed\n"));
+		return -EFAULT;
+	}
+
+	/* Return the number of bytes read. */
+	return count;
+}
+
+
+static int sync_serial_ioctl_unlocked(struct file *file,
+				      unsigned int cmd, unsigned long arg)
 {
 	int return_val = 0;
 	int dma_w_size = regk_dma_set_w_size1;
 	int dev = iminor(file_inode(file));
-	sync_port *port;
+	struct sync_port *port;
 	reg_sser_rw_tr_cfg tr_cfg;
 	reg_sser_rw_rec_cfg rec_cfg;
 	reg_sser_rw_frm_cfg frm_cfg;
 	reg_sser_rw_cfg gen_cfg;
 	reg_sser_rw_intr_mask intr_mask;
 
-	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-	{
-		DEBUG(printk("Invalid minor %d\n", dev));
+	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+		DEBUG(pr_info("Invalid minor %d\n", dev));
 		return -1;
 	}
-        port = &ports[dev];
+
+	if (cmd == SSP_INPUT)
+		return sync_serial_input(file, arg);
+
+	port = &ports[dev];
 	spin_lock_irq(&port->lock);
 
 	tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
@@ -680,11 +763,9 @@
 	gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
 	intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
 
-	switch(cmd)
-	{
+	switch (cmd) {
 	case SSP_SPEED:
-		if (GET_SPEED(arg) == CODEC)
-		{
+		if (GET_SPEED(arg) == CODEC) {
 			unsigned int freq;
 
 			gen_cfg.base_freq = regk_sser_f32;
@@ -701,15 +782,25 @@
 			case FREQ_256kHz:
 				gen_cfg.clk_div = 125 *
 					(1 << (freq - FREQ_256kHz)) - 1;
-			break;
+				break;
 			case FREQ_512kHz:
 				gen_cfg.clk_div = 62;
-			break;
+				break;
 			case FREQ_1MHz:
 			case FREQ_2MHz:
 			case FREQ_4MHz:
 				gen_cfg.clk_div = 8 * (1 << freq) - 1;
-			break;
+				break;
+			}
+		} else if (GET_SPEED(arg) == CODEC_f32768) {
+			gen_cfg.base_freq = regk_sser_f32_768;
+			switch (GET_FREQ(arg)) {
+			case FREQ_4096kHz:
+				gen_cfg.clk_div = 7;
+				break;
+			default:
+				spin_unlock_irq(&port->lock);
+				return -EINVAL;
 			}
 		} else {
 			gen_cfg.base_freq = regk_sser_f29_493;
@@ -767,62 +858,64 @@
 
 		break;
 	case SSP_MODE:
-		switch(arg)
-		{
-			case MASTER_OUTPUT:
-				port->output = 1;
-				port->input = 0;
-				frm_cfg.out_on = regk_sser_tr;
-				frm_cfg.frame_pin_dir = regk_sser_out;
-				gen_cfg.clk_dir = regk_sser_out;
-				break;
-			case SLAVE_OUTPUT:
-				port->output = 1;
-				port->input = 0;
-				frm_cfg.frame_pin_dir = regk_sser_in;
-				gen_cfg.clk_dir = regk_sser_in;
-				break;
-			case MASTER_INPUT:
-				port->output = 0;
-				port->input = 1;
-				frm_cfg.frame_pin_dir = regk_sser_out;
-				frm_cfg.out_on = regk_sser_intern_tb;
-				gen_cfg.clk_dir = regk_sser_out;
-				break;
-			case SLAVE_INPUT:
-				port->output = 0;
-				port->input = 1;
-				frm_cfg.frame_pin_dir = regk_sser_in;
-				gen_cfg.clk_dir = regk_sser_in;
-				break;
-			case MASTER_BIDIR:
-				port->output = 1;
-				port->input = 1;
-				frm_cfg.frame_pin_dir = regk_sser_out;
-				frm_cfg.out_on = regk_sser_intern_tb;
-				gen_cfg.clk_dir = regk_sser_out;
-				break;
-			case SLAVE_BIDIR:
-				port->output = 1;
-				port->input = 1;
-				frm_cfg.frame_pin_dir = regk_sser_in;
-				gen_cfg.clk_dir = regk_sser_in;
-				break;
-			default:
-				spin_unlock_irq(&port->lock);
-				return -EINVAL;
+		switch (arg) {
+		case MASTER_OUTPUT:
+			port->output = 1;
+			port->input = 0;
+			frm_cfg.out_on = regk_sser_tr;
+			frm_cfg.frame_pin_dir = regk_sser_out;
+			gen_cfg.clk_dir = regk_sser_out;
+			break;
+		case SLAVE_OUTPUT:
+			port->output = 1;
+			port->input = 0;
+			frm_cfg.frame_pin_dir = regk_sser_in;
+			gen_cfg.clk_dir = regk_sser_in;
+			break;
+		case MASTER_INPUT:
+			port->output = 0;
+			port->input = 1;
+			frm_cfg.frame_pin_dir = regk_sser_out;
+			frm_cfg.out_on = regk_sser_intern_tb;
+			gen_cfg.clk_dir = regk_sser_out;
+			break;
+		case SLAVE_INPUT:
+			port->output = 0;
+			port->input = 1;
+			frm_cfg.frame_pin_dir = regk_sser_in;
+			gen_cfg.clk_dir = regk_sser_in;
+			break;
+		case MASTER_BIDIR:
+			port->output = 1;
+			port->input = 1;
+			frm_cfg.frame_pin_dir = regk_sser_out;
+			frm_cfg.out_on = regk_sser_intern_tb;
+			gen_cfg.clk_dir = regk_sser_out;
+			break;
+		case SLAVE_BIDIR:
+			port->output = 1;
+			port->input = 1;
+			frm_cfg.frame_pin_dir = regk_sser_in;
+			gen_cfg.clk_dir = regk_sser_in;
+			break;
+		default:
+			spin_unlock_irq(&port->lock);
+			return -EINVAL;
 		}
-		if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
+		if (!port->use_dma || arg == MASTER_OUTPUT ||
+				arg == SLAVE_OUTPUT)
 			intr_mask.rdav = regk_sser_yes;
 		break;
 	case SSP_FRAME_SYNC:
 		if (arg & NORMAL_SYNC) {
 			frm_cfg.rec_delay = 1;
 			frm_cfg.tr_delay = 1;
-		}
-		else if (arg & EARLY_SYNC)
+		} else if (arg & EARLY_SYNC)
 			frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
-		else if (arg & SECOND_WORD_SYNC) {
+		else if (arg & LATE_SYNC) {
+			frm_cfg.tr_delay = 2;
+			frm_cfg.rec_delay = 2;
+		} else if (arg & SECOND_WORD_SYNC) {
 			frm_cfg.rec_delay = 7;
 			frm_cfg.tr_delay = 1;
 		}
@@ -914,15 +1007,12 @@
 		frm_cfg.type = regk_sser_level;
 		frm_cfg.tr_delay = 1;
 		frm_cfg.level = regk_sser_neg_lo;
-		if (arg & SPI_SLAVE)
-		{
+		if (arg & SPI_SLAVE) {
 			rec_cfg.clk_pol = regk_sser_neg;
 			gen_cfg.clk_dir = regk_sser_in;
 			port->input = 1;
 			port->output = 0;
-		}
-		else
-		{
+		} else {
 			gen_cfg.out_clk_pol = regk_sser_pos;
 			port->input = 0;
 			port->output = 1;
@@ -965,19 +1055,19 @@
 }
 
 static long sync_serial_ioctl(struct file *file,
-                             unsigned int cmd, unsigned long arg)
+		unsigned int cmd, unsigned long arg)
 {
-       long ret;
+	long ret;
 
-       mutex_lock(&sync_serial_mutex);
-       ret = sync_serial_ioctl_unlocked(file, cmd, arg);
-       mutex_unlock(&sync_serial_mutex);
+	mutex_lock(&sync_serial_mutex);
+	ret = sync_serial_ioctl_unlocked(file, cmd, arg);
+	mutex_unlock(&sync_serial_mutex);
 
-       return ret;
+	return ret;
 }
 
 /* NOTE: sync_serial_write does not support concurrency */
-static ssize_t sync_serial_write(struct file *file, const char *buf,
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
 				 size_t count, loff_t *ppos)
 {
 	int dev = iminor(file_inode(file));
@@ -993,7 +1083,7 @@
 	unsigned char *buf_stop_ptr; /* Last byte + 1 */
 
 	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
-		DEBUG(printk("Invalid minor %d\n", dev));
+		DEBUG(pr_info("Invalid minor %d\n", dev));
 		return -ENODEV;
 	}
 	port = &ports[dev];
@@ -1006,9 +1096,9 @@
 	 * |_________|___________________|________________________|
 	 *           ^ rd_ptr            ^ wr_ptr
 	 */
-	DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
-			  port->port_nbr, count, port->active_tr_descr,
-			  port->catch_tr_descr));
+	DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
+			   port->port_nbr, count, port->active_tr_descr,
+			   port->catch_tr_descr));
 
 	/* Read variables that may be updated by interrupts */
 	spin_lock_irqsave(&port->lock, flags);
@@ -1020,7 +1110,7 @@
 	if (port->tr_running &&
 	    ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
 	     out_buf_count >= OUT_BUFFER_SIZE)) {
-		DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
+		DEBUGWRITE(pr_info("sser%d full\n", dev));
 		return -EAGAIN;
 	}
 
@@ -1043,15 +1133,16 @@
 	if (copy_from_user(wr_ptr, buf, trunc_count))
 		return -EFAULT;
 
-	DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d     %p %p %p\n",
-			   out_buf_count, trunc_count,
-			   port->out_buf_count, port->out_buffer,
-			   wr_ptr, buf_stop_ptr));
+	DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d     %p %p %p\n",
+			    out_buf_count, trunc_count,
+			    port->out_buf_count, port->out_buffer,
+			    wr_ptr, buf_stop_ptr));
 
 	/* Make sure transmitter/receiver is running */
 	if (!port->started) {
 		reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
-		reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
+		reg_sser_rw_rec_cfg rec_cfg =
+			REG_RD(sser, port->regi_sser, rw_rec_cfg);
 		cfg.en = regk_sser_yes;
 		rec_cfg.rec_en = port->input;
 		REG_WR(sser, port->regi_sser, rw_cfg, cfg);
@@ -1068,8 +1159,11 @@
 	spin_lock_irqsave(&port->lock, flags);
 	port->out_buf_count += trunc_count;
 	if (port->use_dma) {
+#ifdef SYNC_SER_DMA
 		start_dma_out(port, wr_ptr, trunc_count);
+#endif
 	} else if (!port->tr_running) {
+#ifdef SYNC_SER_MANUAL
 		reg_sser_rw_intr_mask intr_mask;
 		intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
 		/* Start sender by writing data */
@@ -1077,14 +1171,15 @@
 		/* and enable transmitter ready IRQ */
 		intr_mask.trdy = 1;
 		REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
+#endif
 	}
 	spin_unlock_irqrestore(&port->lock, flags);
 
 	/* Exit if non blocking */
 	if (file->f_flags & O_NONBLOCK) {
-		DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu  %08x\n",
-				  port->port_nbr, trunc_count,
-				  REG_RD_INT(dma, port->regi_dmaout, r_intr)));
+		DEBUGWRITE(pr_info("w d%d c %u  %08x\n",
+				   port->port_nbr, trunc_count,
+				   REG_RD_INT(dma, port->regi_dmaout, r_intr)));
 		return trunc_count;
 	}
 
@@ -1094,105 +1189,32 @@
 	if (signal_pending(current))
 		return -EINTR;
 
-	DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
-			  port->port_nbr, trunc_count));
+	DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
 	return trunc_count;
 }
 
-static ssize_t sync_serial_read(struct file * file, char * buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	int dev = iminor(file_inode(file));
-	int avail;
-	sync_port *port;
-	unsigned char* start;
-	unsigned char* end;
-	unsigned long flags;
-
-	if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-	{
-		DEBUG(printk("Invalid minor %d\n", dev));
-		return -ENODEV;
-	}
-	port = &ports[dev];
-
-	DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
-
-	if (!port->started)
-	{
-		reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
-		reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
-		reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
-		cfg.en = regk_sser_yes;
-		tr_cfg.tr_en = regk_sser_yes;
-		rec_cfg.rec_en = regk_sser_yes;
-		REG_WR(sser, port->regi_sser, rw_cfg, cfg);
-		REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
-		REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
-		port->started = 1;
-	}
-
-	/* Calculate number of available bytes */
-	/* Save pointers to avoid that they are modified by interrupt */
-	spin_lock_irqsave(&port->lock, flags);
-	start = (unsigned char*)port->readp; /* cast away volatile */
-	end = (unsigned char*)port->writep;  /* cast away volatile */
-	spin_unlock_irqrestore(&port->lock, flags);
-	while ((start == end) && !port->full) /* No data */
-	{
-		DEBUGREAD(printk(KERN_DEBUG "&"));
-		if (file->f_flags & O_NONBLOCK)
-			return -EAGAIN;
-
-		wait_event_interruptible(port->in_wait_q,
-					 !(start == end && !port->full));
-		if (signal_pending(current))
-			return -EINTR;
-
-		spin_lock_irqsave(&port->lock, flags);
-		start = (unsigned char*)port->readp; /* cast away volatile */
-		end = (unsigned char*)port->writep;  /* cast away volatile */
-		spin_unlock_irqrestore(&port->lock, flags);
-	}
-
-	/* Lazy read, never return wrapped data. */
-	if (port->full)
-		avail = port->in_buffer_size;
-	else if (end > start)
-		avail = end - start;
-	else
-		avail = port->flip + port->in_buffer_size - start;
-
-	count = count > avail ? avail : count;
-	if (copy_to_user(buf, start, count))
-		return -EFAULT;
-	/* Disable interrupts while updating readp */
-	spin_lock_irqsave(&port->lock, flags);
-	port->readp += count;
-	if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
-		port->readp = port->flip;
-	port->full = 0;
-	spin_unlock_irqrestore(&port->lock, flags);
-	DEBUGREAD(printk("r %d\n", count));
-	return count;
+	return __sync_serial_read(file, buf, count, ppos, NULL);
 }
 
-static void send_word(sync_port* port)
+#ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port)
 {
 	reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
 	reg_sser_rw_tr_data tr_data =  {0};
 
-	switch(tr_cfg.sample_size)
+	switch (tr_cfg.sample_size) {
+	case 8:
+		port->out_buf_count--;
+		tr_data.data = *port->out_rd_ptr++;
+		REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
+		if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
+			port->out_rd_ptr = port->out_buffer;
+		break;
+	case 12:
 	{
-	 case 8:
-		 port->out_buf_count--;
-		 tr_data.data = *port->out_rd_ptr++;
-		 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
-		 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
-			 port->out_rd_ptr = port->out_buffer;
-		 break;
-	 case 12:
-	 {
 		int data = (*port->out_rd_ptr++) << 8;
 		data |= *port->out_rd_ptr++;
 		port->out_buf_count -= 2;
@@ -1200,8 +1222,8 @@
 		REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
 		if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
 			port->out_rd_ptr = port->out_buffer;
+		break;
 	}
-	break;
 	case 16:
 		port->out_buf_count -= 2;
 		tr_data.data = *(unsigned short *)port->out_rd_ptr;
@@ -1233,27 +1255,28 @@
 		break;
 	}
 }
+#endif
 
-static void start_dma_out(struct sync_port *port,
-			  const char *data, int count)
+#ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count)
 {
-	port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
+	port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
 	port->active_tr_descr->after = port->active_tr_descr->buf + count;
 	port->active_tr_descr->intr = 1;
 
 	port->active_tr_descr->eol = 1;
 	port->prev_tr_descr->eol = 0;
 
-	DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
+	DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
 		port->prev_tr_descr, port->active_tr_descr));
 	port->prev_tr_descr = port->active_tr_descr;
-	port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
+	port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
 
 	if (!port->tr_running) {
 		reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
 			rw_tr_cfg);
 
-		port->out_context.next = 0;
+		port->out_context.next = NULL;
 		port->out_context.saved_data =
 			(dma_descr_data *)virt_to_phys(port->prev_tr_descr);
 		port->out_context.saved_data_buf = port->prev_tr_descr->buf;
@@ -1263,57 +1286,58 @@
 
 		tr_cfg.tr_en = regk_sser_yes;
 		REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
-		DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
+		DEBUGTRDMA(pr_info(KERN_INFO "dma s\n"););
 	} else {
 		DMA_CONTINUE_DATA(port->regi_dmaout);
-		DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
+		DEBUGTRDMA(pr_info("dma c\n"););
 	}
 
 	port->tr_running = 1;
 }
 
-static void start_dma_in(sync_port *port)
+static void start_dma_in(struct sync_port *port)
 {
 	int i;
 	char *buf;
+	unsigned long flags;
+	spin_lock_irqsave(&port->lock, flags);
 	port->writep = port->flip;
+	spin_unlock_irqrestore(&port->lock, flags);
 
-	if (port->writep > port->flip + port->in_buffer_size) {
-		panic("Offset too large in sync serial driver\n");
-		return;
-	}
-	buf = (char*)virt_to_phys(port->in_buffer);
+	buf = (char *)virt_to_phys(port->in_buffer);
 	for (i = 0; i < NBR_IN_DESCR; i++) {
 		port->in_descr[i].buf = buf;
 		port->in_descr[i].after = buf + port->inbufchunk;
 		port->in_descr[i].intr = 1;
-		port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
+		port->in_descr[i].next =
+			(dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
 		port->in_descr[i].buf = buf;
 		buf += port->inbufchunk;
 	}
 	/* Link the last descriptor to the first */
-	port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+	port->in_descr[i-1].next =
+		(dma_descr_data *)virt_to_phys(&port->in_descr[0]);
 	port->in_descr[i-1].eol = regk_sser_yes;
 	port->next_rx_desc = &port->in_descr[0];
 	port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
-	port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+	port->in_context.saved_data =
+		(dma_descr_data *)virt_to_phys(&port->in_descr[0]);
 	port->in_context.saved_data_buf = port->in_descr[0].buf;
 	DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
 }
 
-#ifdef SYNC_SER_DMA
 static irqreturn_t tr_interrupt(int irq, void *dev_id)
 {
 	reg_dma_r_masked_intr masked;
-	reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
+	reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
 	reg_dma_rw_stat stat;
 	int i;
 	int found = 0;
 	int stop_sser = 0;
 
 	for (i = 0; i < NBR_PORTS; i++) {
-		sync_port *port = &ports[i];
-		if (!port->enabled  || !port->use_dma)
+		struct sync_port *port = &ports[i];
+		if (!port->enabled || !port->use_dma)
 			continue;
 
 		/* IRQ active for the port? */
@@ -1338,19 +1362,20 @@
 			int sent;
 			sent = port->catch_tr_descr->after -
 				port->catch_tr_descr->buf;
-			DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
-					  "in descr %p (ac: %p)\n",
-					  port->out_buf_count, sent,
-					  port->out_buf_count - sent,
-					  port->catch_tr_descr,
-					  port->active_tr_descr););
+			DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
+					   "in descr %p (ac: %p)\n",
+					   port->out_buf_count, sent,
+					   port->out_buf_count - sent,
+					   port->catch_tr_descr,
+					   port->active_tr_descr););
 			port->out_buf_count -= sent;
 			port->catch_tr_descr =
 				phys_to_virt((int) port->catch_tr_descr->next);
 			port->out_rd_ptr =
 				phys_to_virt((int) port->catch_tr_descr->buf);
 		} else {
-			int i, sent;
+			reg_sser_rw_tr_cfg tr_cfg;
+			int j, sent;
 			/* EOL handler.
 			 * Note that if an EOL was encountered during the irq
 			 * locked section of sync_ser_write the DMA will be
@@ -1358,11 +1383,11 @@
 			 * The remaining descriptors will be traversed by
 			 * the descriptor interrupts as usual.
 			 */
-			i = 0;
+			j = 0;
 			while (!port->catch_tr_descr->eol) {
 				sent = port->catch_tr_descr->after -
 					port->catch_tr_descr->buf;
-				DEBUGOUTBUF(printk(KERN_DEBUG
+				DEBUGOUTBUF(pr_info(
 					"traversing descr %p -%d (%d)\n",
 					port->catch_tr_descr,
 					sent,
@@ -1370,16 +1395,15 @@
 				port->out_buf_count -= sent;
 				port->catch_tr_descr = phys_to_virt(
 					(int)port->catch_tr_descr->next);
-				i++;
-				if (i >= NBR_OUT_DESCR) {
+				j++;
+				if (j >= NBR_OUT_DESCR) {
 					/* TODO: Reset and recover */
 					panic("sync_serial: missing eol");
 				}
 			}
 			sent = port->catch_tr_descr->after -
 				port->catch_tr_descr->buf;
-			DEBUGOUTBUF(printk(KERN_DEBUG
-				"eol at descr %p -%d (%d)\n",
+			DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
 				port->catch_tr_descr,
 				sent,
 				port->out_buf_count));
@@ -1394,15 +1418,13 @@
 					OUT_BUFFER_SIZE)
 				port->out_rd_ptr = port->out_buffer;
 
-			reg_sser_rw_tr_cfg tr_cfg =
-				REG_RD(sser, port->regi_sser, rw_tr_cfg);
-			DEBUGTXINT(printk(KERN_DEBUG
+			tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
+			DEBUGTXINT(pr_info(
 				"tr_int DMA stop %d, set catch @ %p\n",
 				port->out_buf_count,
 				port->active_tr_descr));
 			if (port->out_buf_count != 0)
-				printk(KERN_CRIT "sync_ser: buffer not "
-					"empty after eol.\n");
+				pr_err("sync_ser: buf not empty after eol\n");
 			port->catch_tr_descr = port->active_tr_descr;
 			port->tr_running = 0;
 			tr_cfg.tr_en = regk_sser_no;
@@ -1414,62 +1436,79 @@
 	return IRQ_RETVAL(found);
 } /* tr_interrupt */
 
+
+static inline void handle_rx_packet(struct sync_port *port)
+{
+	int idx;
+	reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
+	unsigned long flags;
+
+	DEBUGRXINT(pr_info(KERN_INFO "!"));
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* If we overrun the user experience is crap regardless if we
+	 * drop new or old data. Its much easier to get it right when
+	 * dropping new data so lets do that.
+	 */
+	if ((port->writep + port->inbufchunk <=
+	     port->flip + port->in_buffer_size) &&
+	    (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) {
+		memcpy(port->writep,
+		       phys_to_virt((unsigned)port->next_rx_desc->buf),
+		       port->inbufchunk);
+		port->writep += port->inbufchunk;
+		if (port->writep >= port->flip + port->in_buffer_size)
+			port->writep = port->flip;
+
+		/* Timestamp the new data chunk. */
+		if (port->write_ts_idx == NBR_IN_DESCR)
+			port->write_ts_idx = 0;
+		idx = port->write_ts_idx++;
+		do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
+		port->in_buffer_len += port->inbufchunk;
+	}
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	port->next_rx_desc->eol = 1;
+	port->prev_rx_desc->eol = 0;
+	/* Cache bug workaround */
+	flush_dma_descr(port->prev_rx_desc, 0);
+	port->prev_rx_desc = port->next_rx_desc;
+	port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
+	/* Cache bug workaround */
+	flush_dma_descr(port->prev_rx_desc, 1);
+	/* wake up the waiting process */
+	wake_up_interruptible(&port->in_wait_q);
+	DMA_CONTINUE(port->regi_dmain);
+	REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
+
+}
+
 static irqreturn_t rx_interrupt(int irq, void *dev_id)
 {
 	reg_dma_r_masked_intr masked;
-	reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
 
 	int i;
 	int found = 0;
 
-	for (i = 0; i < NBR_PORTS; i++)
-	{
-		sync_port *port = &ports[i];
+	DEBUG(pr_info("rx_interrupt\n"));
 
-		if (!port->enabled || !port->use_dma )
+	for (i = 0; i < NBR_PORTS; i++) {
+		struct sync_port *port = &ports[i];
+
+		if (!port->enabled || !port->use_dma)
 			continue;
 
 		masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
 
-		if (masked.data) /* Descriptor interrupt */
-		{
-			found = 1;
-			while (REG_RD(dma, port->regi_dmain, rw_data) !=
-			       virt_to_phys(port->next_rx_desc)) {
-				DEBUGRXINT(printk(KERN_DEBUG "!"));
-				if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
-					int first_size = port->flip + port->in_buffer_size - port->writep;
-					memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
-					memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
-					port->writep = port->flip + port->inbufchunk - first_size;
-				} else {
-					memcpy((char*)port->writep,
-					       phys_to_virt((unsigned)port->next_rx_desc->buf),
-					       port->inbufchunk);
-					port->writep += port->inbufchunk;
-					if (port->writep >= port->flip + port->in_buffer_size)
-						port->writep = port->flip;
-				}
-                                if (port->writep == port->readp)
-                                {
-				  port->full = 1;
-                                }
+		if (!masked.data)
+			continue;
 
-				port->next_rx_desc->eol = 1;
-				port->prev_rx_desc->eol = 0;
-				/* Cache bug workaround */
-				flush_dma_descr(port->prev_rx_desc, 0);
-				port->prev_rx_desc = port->next_rx_desc;
-				port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
-				/* Cache bug workaround */
-				flush_dma_descr(port->prev_rx_desc, 1);
-				/* wake up the waiting process */
-				wake_up_interruptible(&port->in_wait_q);
-				DMA_CONTINUE(port->regi_dmain);
-				REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
-
-			}
-		}
+		/* Descriptor interrupt */
+		found = 1;
+		while (REG_RD(dma, port->regi_dmain, rw_data) !=
+				virt_to_phys(port->next_rx_desc))
+			handle_rx_packet(port);
 	}
 	return IRQ_RETVAL(found);
 } /* rx_interrupt */
@@ -1478,75 +1517,83 @@
 #ifdef SYNC_SER_MANUAL
 static irqreturn_t manual_interrupt(int irq, void *dev_id)
 {
+	unsigned long flags;
 	int i;
 	int found = 0;
 	reg_sser_r_masked_intr masked;
 
-	for (i = 0; i < NBR_PORTS; i++)
-	{
-		sync_port *port = &ports[i];
+	for (i = 0; i < NBR_PORTS; i++) {
+		struct sync_port *port = &ports[i];
 
 		if (!port->enabled || port->use_dma)
-		{
 			continue;
-		}
 
 		masked = REG_RD(sser, port->regi_sser, r_masked_intr);
-		if (masked.rdav)	/* Data received? */
-		{
-			reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
-			reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
+		/* Data received? */
+		if (masked.rdav) {
+			reg_sser_rw_rec_cfg rec_cfg =
+				REG_RD(sser, port->regi_sser, rw_rec_cfg);
+			reg_sser_r_rec_data data = REG_RD(sser,
+				port->regi_sser, r_rec_data);
 			found = 1;
 			/* Read data */
-			switch(rec_cfg.sample_size)
-			{
+			spin_lock_irqsave(&port->lock, flags);
+			switch (rec_cfg.sample_size) {
 			case 8:
 				*port->writep++ = data.data & 0xff;
 				break;
 			case 12:
 				*port->writep = (data.data & 0x0ff0) >> 4;
 				*(port->writep + 1) = data.data & 0x0f;
-				port->writep+=2;
+				port->writep += 2;
 				break;
 			case 16:
-				*(unsigned short*)port->writep = data.data;
-				port->writep+=2;
+				*(unsigned short *)port->writep = data.data;
+				port->writep += 2;
 				break;
 			case 24:
-				*(unsigned int*)port->writep = data.data;
-				port->writep+=3;
+				*(unsigned int *)port->writep = data.data;
+				port->writep += 3;
 				break;
 			case 32:
-				*(unsigned int*)port->writep = data.data;
-				port->writep+=4;
+				*(unsigned int *)port->writep = data.data;
+				port->writep += 4;
 				break;
 			}
 
-			if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
+			/* Wrap? */
+			if (port->writep >= port->flip + port->in_buffer_size)
 				port->writep = port->flip;
 			if (port->writep == port->readp) {
-				/* receive buffer overrun, discard oldest data
-				 */
+				/* Receive buf overrun, discard oldest data */
 				port->readp++;
-				if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
+				/* Wrap? */
+				if (port->readp >= port->flip +
+						port->in_buffer_size)
 					port->readp = port->flip;
 			}
+			spin_unlock_irqrestore(&port->lock, flags);
 			if (sync_data_avail(port) >= port->inbufchunk)
-				wake_up_interruptible(&port->in_wait_q); /* Wake up application */
+				/* Wake up application */
+				wake_up_interruptible(&port->in_wait_q);
 		}
 
-		if (masked.trdy) /* Transmitter ready? */
-		{
+		/* Transmitter ready? */
+		if (masked.trdy) {
 			found = 1;
-			if (port->out_buf_count > 0) /* More data to send */
+			/* More data to send */
+			if (port->out_buf_count > 0)
 				send_word(port);
-			else /* transmission finished */
-			{
+			else {
+				/* Transmission finished */
 				reg_sser_rw_intr_mask intr_mask;
-				intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
+				intr_mask = REG_RD(sser, port->regi_sser,
+					rw_intr_mask);
 				intr_mask.trdy = 0;
-				REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
-				wake_up_interruptible(&port->out_wait_q); /* Wake up application */
+				REG_WR(sser, port->regi_sser,
+					rw_intr_mask, intr_mask);
+				/* Wake up application */
+				wake_up_interruptible(&port->out_wait_q);
 			}
 		}
 	}
@@ -1554,4 +1601,109 @@
 }
 #endif
 
+static int __init etrax_sync_serial_init(void)
+{
+#if 1
+	/* This code will be removed when we move to udev for all devices. */
+	syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0);
+	if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) {
+		pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR);
+		return -1;
+	}
+#else
+	/* Allocate dynamic major number. */
+	if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) {
+		pr_err("Failed to allocate character device region\n");
+		return -1;
+	}
+#endif
+	syncser_cdev = cdev_alloc();
+	if (!syncser_cdev) {
+		pr_err("Failed to allocate cdev for syncser\n");
+		unregister_chrdev_region(syncser_first, minor_count);
+		return -1;
+	}
+	cdev_init(syncser_cdev, &syncser_fops);
+
+	/* Create a sysfs class for syncser */
+	syncser_class = class_create(THIS_MODULE, "syncser_class");
+
+	/* Initialize Ports */
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
+	if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) {
+		pr_warn("Unable to alloc pins for synchronous serial port 0\n");
+		unregister_chrdev_region(syncser_first, minor_count);
+		return -EIO;
+	}
+	initialize_port(0);
+	ports[0].enabled = 1;
+	/* Register with sysfs so udev can pick it up. */
+	device_create(syncser_class, NULL, syncser_first, NULL,
+		      "%s%d", SYNCSER_NAME, 0);
+#endif
+
+#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
+	if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) {
+		pr_warn("Unable to alloc pins for synchronous serial port 1\n");
+		unregister_chrdev_region(syncser_first, minor_count);
+		class_destroy(syncser_class);
+		return -EIO;
+	}
+	initialize_port(1);
+	ports[1].enabled = 1;
+	/* Register with sysfs so udev can pick it up. */
+	device_create(syncser_class, NULL, syncser_first, NULL,
+		      "%s%d", SYNCSER_NAME, 0);
+#endif
+
+	/* Add it to system */
+	if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) {
+		pr_err("Failed to add syncser as char device\n");
+		device_destroy(syncser_class, syncser_first);
+		class_destroy(syncser_class);
+		cdev_del(syncser_cdev);
+		unregister_chrdev_region(syncser_first, minor_count);
+		return -1;
+	}
+
+
+	pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n",
+		SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first));
+
+	return 0;
+}
+
+static void __exit etrax_sync_serial_exit(void)
+{
+	int i;
+	device_destroy(syncser_class, syncser_first);
+	class_destroy(syncser_class);
+
+	if (syncser_cdev) {
+		cdev_del(syncser_cdev);
+		unregister_chrdev_region(syncser_first, minor_count);
+	}
+	for (i = 0; i < NBR_PORTS; i++) {
+		struct sync_port *port = &ports[i];
+		if (port->init_irqs == dma_irq_setup) {
+			/* Free dma irqs and dma channels. */
+#ifdef SYNC_SER_DMA
+			artpec_free_dma(port->dma_in_nbr);
+			artpec_free_dma(port->dma_out_nbr);
+			free_irq(port->dma_out_intr_vect, port);
+			free_irq(port->dma_in_intr_vect, port);
+#endif
+		} else if (port->init_irqs == manual_irq_setup) {
+			/* Free manual irq. */
+			free_irq(port->syncser_intr_vect, port);
+		}
+	}
+
+	pr_info("ARTPEC synchronous serial port unregistered\n");
+}
+
 module_init(etrax_sync_serial_init);
+module_exit(etrax_sync_serial_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c
index 610909b..02e33eb 100644
--- a/arch/cris/arch-v32/kernel/debugport.c
+++ b/arch/cris/arch-v32/kernel/debugport.c
@@ -3,7 +3,9 @@
  */
 
 #include <linux/console.h>
+#include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/string.h>
 #include <hwregs/reg_rdwr.h>
 #include <hwregs/reg_map.h>
 #include <hwregs/ser_defs.h>
@@ -65,6 +67,7 @@
   },
 #endif
 };
+
 static struct dbg_port *port =
 #if defined(CONFIG_ETRAX_DEBUG_PORT0)
 	&ports[0];
@@ -97,14 +100,19 @@
 #endif
 #endif
 
-static void
-start_port(struct dbg_port* p)
+static void start_port(struct dbg_port *p)
 {
-	if (!p)
+	/* Set up serial port registers */
+	reg_ser_rw_tr_ctrl tr_ctrl = {0};
+	reg_ser_rw_tr_dma_en tr_dma_en = {0};
+
+	reg_ser_rw_rec_ctrl rec_ctrl = {0};
+	reg_ser_rw_tr_baud_div tr_baud_div = {0};
+	reg_ser_rw_rec_baud_div rec_baud_div = {0};
+
+	if (!p || p->started)
 		return;
 
-	if (p->started)
-		return;
 	p->started = 1;
 
 	if (p->nbr == 1)
@@ -118,36 +126,24 @@
 		crisv32_pinmux_alloc_fixed(pinmux_ser4);
 #endif
 
-	/* Set up serial port registers */
-	reg_ser_rw_tr_ctrl tr_ctrl = {0};
-	reg_ser_rw_tr_dma_en tr_dma_en = {0};
-
-	reg_ser_rw_rec_ctrl rec_ctrl = {0};
-	reg_ser_rw_tr_baud_div tr_baud_div = {0};
-	reg_ser_rw_rec_baud_div rec_baud_div = {0};
-
 	tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493;
 	tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no;
 	tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8;
 	tr_ctrl.en = rec_ctrl.en = 1;
 
-	if (p->parity == 'O')
-	{
+	if (p->parity == 'O') {
 		tr_ctrl.par_en = regk_ser_yes;
 		tr_ctrl.par = regk_ser_odd;
 		rec_ctrl.par_en = regk_ser_yes;
 		rec_ctrl.par = regk_ser_odd;
-	}
-	else if (p->parity == 'E')
-	{
+	} else if (p->parity == 'E') {
 		tr_ctrl.par_en = regk_ser_yes;
 		tr_ctrl.par = regk_ser_even;
 		rec_ctrl.par_en = regk_ser_yes;
 		rec_ctrl.par = regk_ser_odd;
 	}
 
-	if (p->bits == 7)
-	{
+	if (p->bits == 7) {
 		tr_ctrl.data_bits = regk_ser_bits7;
 		rec_ctrl.data_bits = regk_ser_bits7;
 	}
@@ -161,8 +157,7 @@
 
 #ifdef CONFIG_ETRAX_KGDB
 /* Use polling to get a single character from the kernel debug port */
-int
-getDebugChar(void)
+int getDebugChar(void)
 {
 	reg_ser_rs_stat_din stat;
 	reg_ser_rw_ack_intr ack_intr = { 0 };
@@ -179,8 +174,7 @@
 }
 
 /* Use polling to put a single character to the kernel debug port */
-void
-putDebugChar(int val)
+void putDebugChar(int val)
 {
 	reg_ser_r_stat_din stat;
 	do {
@@ -190,12 +184,48 @@
 }
 #endif /* CONFIG_ETRAX_KGDB */
 
+static void __init early_putch(int c)
+{
+	reg_ser_r_stat_din stat;
+	/* Wait until transmitter is ready and send. */
+	do
+		stat = REG_RD(ser, port->instance, r_stat_din);
+	while (!stat.tr_rdy);
+	REG_WR_INT(ser, port->instance, rw_dout, c);
+}
+
+static void __init
+early_console_write(struct console *con, const char *s, unsigned n)
+{
+	extern void reset_watchdog(void);
+	int i;
+
+	/* Send data. */
+	for (i = 0; i < n; i++) {
+		/* TODO: the '\n' -> '\n\r' translation should be done at the
+		   receiver. Remove it when the serial driver removes it.   */
+		if (s[i] == '\n')
+			early_putch('\r');
+		early_putch(s[i]);
+		reset_watchdog();
+	}
+}
+
+static struct console early_console_dev __initdata = {
+	.name   = "early",
+	.write  = early_console_write,
+	.flags  = CON_PRINTBUFFER | CON_BOOT,
+	.index  = -1
+};
+
 /* Register console for printk's, etc. */
-int __init
-init_etrax_debug(void)
+int __init init_etrax_debug(void)
 {
         start_port(port);
 
+	/* Register an early console if a debug port was chosen.  */
+	register_console(&early_console_dev);
+
 #ifdef CONFIG_ETRAX_KGDB
 	start_port(kgdb_port);
 #endif /* CONFIG_ETRAX_KGDB */
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index ee66866..eb74dab 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/threads.h>
 #include <linux/cpufreq.h>
+#include <linux/mm.h>
 #include <asm/types.h>
 #include <asm/signal.h>
 #include <asm/io.h>
@@ -56,7 +57,6 @@
 }
 arch_initcall(etrax_init_cont_rotime);
 
-
 unsigned long timer_regs[NR_CPUS] =
 {
 	regi_timer0,
@@ -68,9 +68,8 @@
 extern int set_rtc_mmss(unsigned long nowtime);
 
 #ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
-			void *data);
+static int cris_time_freq_notifier(struct notifier_block *nb,
+				   unsigned long val, void *data);
 
 static struct notifier_block cris_time_freq_notifier_block = {
 	.notifier_call = cris_time_freq_notifier,
@@ -87,7 +86,6 @@
 	return ns;
 }
 
-
 /* From timer MDS describing the hardware watchdog:
  * 4.3.1 Watchdog Operation
  * The watchdog timer is an 8-bit timer with a configurable start value.
@@ -109,11 +107,18 @@
  * is used though, so set this really low. */
 #define WATCHDOG_MIN_FREE_PAGES 8
 
+/* for reliable NICE_DOGGY behaviour */
+static int bite_in_progress;
+
 void reset_watchdog(void)
 {
 #if defined(CONFIG_ETRAX_WATCHDOG)
 	reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
 
+#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY)
+	if (unlikely(bite_in_progress))
+		return;
+#endif
 	/* Only keep watchdog happy as long as we have memory left! */
 	if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
 		/* Reset the watchdog with the inverse of the old key */
@@ -148,7 +153,9 @@
 #if defined(CONFIG_ETRAX_WATCHDOG)
 	extern int cause_of_death;
 
+	nmi_enter();
 	oops_in_progress = 1;
+	bite_in_progress = 1;
 	printk(KERN_WARNING "Watchdog bite\n");
 
 	/* Check if forced restart or unexpected watchdog */
@@ -170,6 +177,7 @@
 	printk(KERN_WARNING "Oops: bitten by watchdog\n");
 	show_registers(regs);
 	oops_in_progress = 0;
+	printk("\n"); /* Flush mtdoops.  */
 #ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
 	reset_watchdog();
 #endif
@@ -202,7 +210,7 @@
 	/* Reset watchdog otherwise it resets us! */
 	reset_watchdog();
 
-        /* Update statistics. */
+	/* Update statistics. */
 	update_process_times(user_mode(regs));
 
 	cris_do_profile(regs); /* Save profiling information */
@@ -213,7 +221,7 @@
 
 	/* Call the real timer interrupt handler */
 	xtime_update(1);
-        return IRQ_HANDLED;
+	return IRQ_HANDLED;
 }
 
 /* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
@@ -293,14 +301,13 @@
 
 #ifdef CONFIG_CPU_FREQ
 	cpufreq_register_notifier(&cris_time_freq_notifier_block,
-		CPUFREQ_TRANSITION_NOTIFIER);
+				  CPUFREQ_TRANSITION_NOTIFIER);
 #endif
 }
 
 #ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
-			void *data)
+static int cris_time_freq_notifier(struct notifier_block *nb,
+				   unsigned long val, void *data)
 {
 	struct cpufreq_freqs *freqs = data;
 	if (val == CPUFREQ_POSTCHANGE) {
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c
index 0b5b70d..f0f335d 100644
--- a/arch/cris/arch-v32/lib/usercopy.c
+++ b/arch/cris/arch-v32/lib/usercopy.c
@@ -26,8 +26,7 @@
 /* Copy to userspace.  This is based on the memcpy used for
    kernel-to-kernel copying; see "string.c".  */
 
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -155,13 +154,13 @@
 
   return retn;
 }
+EXPORT_SYMBOL(__copy_user);
 
 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
    userland.  The return-value is the number of bytes that were
    inaccessible.  */
-
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+				  unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -321,11 +320,10 @@
 
   return retn + n;
 }
+EXPORT_SYMBOL(__copy_user_zeroing);
 
 /* Zero userspace.  */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -468,3 +466,4 @@
 
   return retn;
 }
+EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c
index 38f29ee..05a0470 100644
--- a/arch/cris/arch-v32/mach-fs/pinmux.c
+++ b/arch/cris/arch-v32/mach-fs/pinmux.c
@@ -26,44 +26,15 @@
 
 static void crisv32_pinmux_set(int port);
 
-int crisv32_pinmux_init(void)
-{
-	static int initialized;
-
-	if (!initialized) {
-		reg_pinmux_rw_pa pa = REG_RD(pinmux, regi_pinmux, rw_pa);
-		initialized = 1;
-		REG_WR_INT(pinmux, regi_pinmux, rw_hwprot, 0);
-		pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
-		    pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
-		REG_WR(pinmux, regi_pinmux, rw_pa, pa);
-		crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
-		crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
-		crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
-		crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
-	}
-
-	return 0;
-}
-
-int
-crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
+static int __crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+				 enum pin_mode mode)
 {
 	int i;
-	unsigned long flags;
-
-	crisv32_pinmux_init();
-
-	if (port > PORTS || port < 0)
-		return -EINVAL;
-
-	spin_lock_irqsave(&pinmux_lock, flags);
 
 	for (i = first_pin; i <= last_pin; i++) {
 		if ((pins[port][i] != pinmux_none)
 		    && (pins[port][i] != pinmux_gpio)
 		    && (pins[port][i] != mode)) {
-			spin_unlock_irqrestore(&pinmux_lock, flags);
 #ifdef DEBUG
 			panic("Pinmux alloc failed!\n");
 #endif
@@ -75,10 +46,46 @@
 		pins[port][i] = mode;
 
 	crisv32_pinmux_set(port);
+}
+
+static int crisv32_pinmux_init(void)
+{
+	static int initialized;
+
+	if (!initialized) {
+		reg_pinmux_rw_pa pa = REG_RD(pinmux, regi_pinmux, rw_pa);
+		initialized = 1;
+		REG_WR_INT(pinmux, regi_pinmux, rw_hwprot, 0);
+		pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
+		    pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
+		REG_WR(pinmux, regi_pinmux, rw_pa, pa);
+		__crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
+		__crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
+		__crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
+		__crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
+	}
+
+	return 0;
+}
+
+int crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+			 enum pin_mode mode)
+{
+	unsigned long flags;
+	int ret;
+
+	crisv32_pinmux_init();
+
+	if (port > PORTS || port < 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&pinmux_lock, flags);
+
+	ret = __crisv32_pinmux_alloc(port, first_pin, last_pin, mode);
 
 	spin_unlock_irqrestore(&pinmux_lock, flags);
 
-	return 0;
+	return ret;
 }
 
 int crisv32_pinmux_alloc_fixed(enum fixed_function function)
@@ -98,58 +105,58 @@
 
 	switch (function) {
 	case pinmux_ser1:
-		ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
 		hwprot.ser1 = regk_pinmux_yes;
 		break;
 	case pinmux_ser2:
-		ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
 		hwprot.ser2 = regk_pinmux_yes;
 		break;
 	case pinmux_ser3:
-		ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
 		hwprot.ser3 = regk_pinmux_yes;
 		break;
 	case pinmux_sser0:
-		ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
-		ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
+		ret |= __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
 		hwprot.sser0 = regk_pinmux_yes;
 		break;
 	case pinmux_sser1:
-		ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
 		hwprot.sser1 = regk_pinmux_yes;
 		break;
 	case pinmux_ata0:
-		ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
-		ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
+		ret |= __crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
 		hwprot.ata0 = regk_pinmux_yes;
 		break;
 	case pinmux_ata1:
-		ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
-		ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+		ret |= __crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
 		hwprot.ata1 = regk_pinmux_yes;
 		break;
 	case pinmux_ata2:
-		ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
-		ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
+		ret |= __crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
 		hwprot.ata2 = regk_pinmux_yes;
 		break;
 	case pinmux_ata3:
-		ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
-		ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
+		ret |= __crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
 		hwprot.ata2 = regk_pinmux_yes;
 		break;
 	case pinmux_ata:
-		ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
-		ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
+		ret |= __crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
 		hwprot.ata = regk_pinmux_yes;
 		break;
 	case pinmux_eth1:
-		ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
 		hwprot.eth1 = regk_pinmux_yes;
 		hwprot.eth1_mgm = regk_pinmux_yes;
 		break;
 	case pinmux_timer:
-		ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+		ret = __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
 		hwprot.timer = regk_pinmux_yes;
 		spin_unlock_irqrestore(&pinmux_lock, flags);
 		return ret;
@@ -188,9 +195,19 @@
 #endif
 }
 
-int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+static int __crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
 {
 	int i;
+
+	for (i = first_pin; i <= last_pin; i++)
+		pins[port][i] = pinmux_none;
+
+	crisv32_pinmux_set(port);
+	return 0;
+}
+
+int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+{
 	unsigned long flags;
 
 	crisv32_pinmux_init();
@@ -199,11 +216,7 @@
 		return -EINVAL;
 
 	spin_lock_irqsave(&pinmux_lock, flags);
-
-	for (i = first_pin; i <= last_pin; i++)
-		pins[port][i] = pinmux_none;
-
-	crisv32_pinmux_set(port);
+	__crisv32_pinmux_dealloc(port, first_pin, last_pin);
 	spin_unlock_irqrestore(&pinmux_lock, flags);
 
 	return 0;
@@ -226,58 +239,58 @@
 
 	switch (function) {
 	case pinmux_ser1:
-		ret = crisv32_pinmux_dealloc(PORT_C, 4, 7);
+		ret = __crisv32_pinmux_dealloc(PORT_C, 4, 7);
 		hwprot.ser1 = regk_pinmux_no;
 		break;
 	case pinmux_ser2:
-		ret = crisv32_pinmux_dealloc(PORT_C, 8, 11);
+		ret = __crisv32_pinmux_dealloc(PORT_C, 8, 11);
 		hwprot.ser2 = regk_pinmux_no;
 		break;
 	case pinmux_ser3:
-		ret = crisv32_pinmux_dealloc(PORT_C, 12, 15);
+		ret = __crisv32_pinmux_dealloc(PORT_C, 12, 15);
 		hwprot.ser3 = regk_pinmux_no;
 		break;
 	case pinmux_sser0:
-		ret = crisv32_pinmux_dealloc(PORT_C, 0, 3);
-		ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16);
+		ret = __crisv32_pinmux_dealloc(PORT_C, 0, 3);
+		ret |= __crisv32_pinmux_dealloc(PORT_C, 16, 16);
 		hwprot.sser0 = regk_pinmux_no;
 		break;
 	case pinmux_sser1:
-		ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
+		ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
 		hwprot.sser1 = regk_pinmux_no;
 		break;
 	case pinmux_ata0:
-		ret = crisv32_pinmux_dealloc(PORT_D, 5, 7);
-		ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17);
+		ret = __crisv32_pinmux_dealloc(PORT_D, 5, 7);
+		ret |= __crisv32_pinmux_dealloc(PORT_D, 15, 17);
 		hwprot.ata0 = regk_pinmux_no;
 		break;
 	case pinmux_ata1:
-		ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
-		ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17);
+		ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
+		ret |= __crisv32_pinmux_dealloc(PORT_E, 17, 17);
 		hwprot.ata1 = regk_pinmux_no;
 		break;
 	case pinmux_ata2:
-		ret = crisv32_pinmux_dealloc(PORT_C, 11, 15);
-		ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3);
+		ret = __crisv32_pinmux_dealloc(PORT_C, 11, 15);
+		ret |= __crisv32_pinmux_dealloc(PORT_E, 3, 3);
 		hwprot.ata2 = regk_pinmux_no;
 		break;
 	case pinmux_ata3:
-		ret = crisv32_pinmux_dealloc(PORT_C, 8, 10);
-		ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2);
+		ret = __crisv32_pinmux_dealloc(PORT_C, 8, 10);
+		ret |= __crisv32_pinmux_dealloc(PORT_C, 0, 2);
 		hwprot.ata2 = regk_pinmux_no;
 		break;
 	case pinmux_ata:
-		ret = crisv32_pinmux_dealloc(PORT_B, 0, 15);
-		ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15);
+		ret = __crisv32_pinmux_dealloc(PORT_B, 0, 15);
+		ret |= __crisv32_pinmux_dealloc(PORT_D, 8, 15);
 		hwprot.ata = regk_pinmux_no;
 		break;
 	case pinmux_eth1:
-		ret = crisv32_pinmux_dealloc(PORT_E, 0, 17);
+		ret = __crisv32_pinmux_dealloc(PORT_E, 0, 17);
 		hwprot.eth1 = regk_pinmux_no;
 		hwprot.eth1_mgm = regk_pinmux_no;
 		break;
 	case pinmux_timer:
-		ret = crisv32_pinmux_dealloc(PORT_C, 16, 16);
+		ret = __crisv32_pinmux_dealloc(PORT_C, 16, 16);
 		hwprot.timer = regk_pinmux_no;
 		spin_unlock_irqrestore(&pinmux_lock, flags);
 		return ret;
@@ -293,7 +306,8 @@
 	return ret;
 }
 
-void crisv32_pinmux_dump(void)
+#ifdef DEBUG
+static void crisv32_pinmux_dump(void)
 {
 	int i, j;
 
@@ -305,5 +319,5 @@
 			printk(KERN_DEBUG "  Pin %d = %d\n", j, pins[i][j]);
 	}
 }
-
+#endif
 __initcall(crisv32_pinmux_init);
diff --git a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
index c2b3036..09bf0c9 100644
--- a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
+++ b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
@@ -28,11 +28,9 @@
   pinmux_timer
 };
 
-int crisv32_pinmux_init(void);
 int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode);
 int crisv32_pinmux_alloc_fixed(enum fixed_function function);
 int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin);
 int crisv32_pinmux_dealloc_fixed(enum fixed_function function);
-void crisv32_pinmux_dump(void);
 
 #endif
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index d5f1248..889f2de 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,8 +1,4 @@
 
-header-y += arch-v10/
-header-y += arch-v32/
-
-
 generic-y += barrier.h
 generic-y += clkdev.h
 generic-y += cputime.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index 7d47b36..01f66b8 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,8 +1,8 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
-header-y += arch-v10/
-header-y += arch-v32/
+header-y += ../arch-v10/arch/
+header-y += ../arch-v32/arch/
 header-y += auxvec.h
 header-y += bitsperlong.h
 header-y += byteorder.h
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 5868cee..3908b94 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -47,16 +47,16 @@
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
 
-/* Userspace access functions */
-EXPORT_SYMBOL(__copy_user_zeroing);
-EXPORT_SYMBOL(__copy_user);
-
 #undef memcpy
 #undef memset
 extern void * memset(void *, int, __kernel_size_t);
 extern void * memcpy(void *, const void *, __kernel_size_t);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
+#ifdef CONFIG_ETRAX_ARCH_V32
+#undef strcmp
+EXPORT_SYMBOL(strcmp);
+#endif
 
 #ifdef CONFIG_ETRAX_FAST_TIMER
 /* Fast timer functions */
@@ -66,3 +66,4 @@
 EXPORT_SYMBOL(schedule_usleep);
 #endif
 EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index 0ffda73..da4c724 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -14,6 +14,10 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/utsname.h>
+#ifdef CONFIG_KALLSYMS
+#include <linux/kallsyms.h>
+#endif
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
@@ -34,25 +38,24 @@
 
 void (*nmi_handler)(struct pt_regs *);
 
-void
-show_trace(unsigned long *stack)
+void show_trace(unsigned long *stack)
 {
 	unsigned long addr, module_start, module_end;
 	extern char _stext, _etext;
 	int i;
 
-	printk("\nCall Trace: ");
+	pr_err("\nCall Trace: ");
 
 	i = 1;
 	module_start = VMALLOC_START;
 	module_end = VMALLOC_END;
 
-	while (((long)stack & (THREAD_SIZE-1)) != 0) {
+	while (((long)stack & (THREAD_SIZE - 1)) != 0) {
 		if (__get_user(addr, stack)) {
 			/* This message matches "failing address" marked
 			   s390 in ksymoops, so lines containing it will
 			   not be filtered out by ksymoops.  */
-			printk("Failing address 0x%lx\n", (unsigned long)stack);
+			pr_err("Failing address 0x%lx\n", (unsigned long)stack);
 			break;
 		}
 		stack++;
@@ -68,10 +71,14 @@
 		if (((addr >= (unsigned long)&_stext) &&
 		     (addr <= (unsigned long)&_etext)) ||
 		    ((addr >= module_start) && (addr <= module_end))) {
+#ifdef CONFIG_KALLSYMS
+			print_ip_sym(addr);
+#else
 			if (i && ((i % 8) == 0))
-				printk("\n       ");
-			printk("[<%08lx>] ", addr);
+				pr_err("\n       ");
+			pr_err("[<%08lx>] ", addr);
 			i++;
+#endif
 		}
 	}
 }
@@ -111,21 +118,21 @@
 
 	stack = sp;
 
-	printk("\nStack from %08lx:\n       ", (unsigned long)stack);
+	pr_err("\nStack from %08lx:\n       ", (unsigned long)stack);
 	for (i = 0; i < kstack_depth_to_print; i++) {
 		if (((long)stack & (THREAD_SIZE-1)) == 0)
 			break;
 		if (i && ((i % 8) == 0))
-			printk("\n       ");
+			pr_err("\n       ");
 		if (__get_user(addr, stack)) {
 			/* This message matches "failing address" marked
 			   s390 in ksymoops, so lines containing it will
 			   not be filtered out by ksymoops.  */
-			printk("Failing address 0x%lx\n", (unsigned long)stack);
+			pr_err("Failing address 0x%lx\n", (unsigned long)stack);
 			break;
 		}
 		stack++;
-		printk("%08lx ", addr);
+		pr_err("%08lx ", addr);
 	}
 	show_trace(sp);
 }
@@ -139,33 +146,32 @@
 	unsigned long *sp = (unsigned long *)rdusp();
 	int i;
 
-	printk("Stack dump [0x%08lx]:\n", (unsigned long)sp);
+	pr_err("Stack dump [0x%08lx]:\n", (unsigned long)sp);
 	for (i = 0; i < 16; i++)
-		printk("sp + %d: 0x%08lx\n", i*4, sp[i]);
+		pr_err("sp + %d: 0x%08lx\n", i*4, sp[i]);
 	return 0;
 }
 #endif
 
-void
-set_nmi_handler(void (*handler)(struct pt_regs *))
+void set_nmi_handler(void (*handler)(struct pt_regs *))
 {
 	nmi_handler = handler;
 	arch_enable_nmi();
 }
 
 #ifdef CONFIG_DEBUG_NMI_OOPS
-void
-oops_nmi_handler(struct pt_regs *regs)
+void oops_nmi_handler(struct pt_regs *regs)
 {
 	stop_watchdog();
 	oops_in_progress = 1;
-	printk("NMI!\n");
+	pr_err("NMI!\n");
 	show_registers(regs);
 	oops_in_progress = 0;
+	oops_exit();
+	pr_err("\n"); /* Flush mtdoops.  */
 }
 
-static int __init
-oops_nmi_register(void)
+static int __init oops_nmi_register(void)
 {
 	set_nmi_handler(oops_nmi_handler);
 	return 0;
@@ -180,8 +186,7 @@
  * similar to an Oops dump, and if the kernel is configured to be a nice
  * doggy, then halt instead of reboot.
  */
-void
-watchdog_bite_hook(struct pt_regs *regs)
+void watchdog_bite_hook(struct pt_regs *regs)
 {
 #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
 	local_irq_disable();
@@ -196,8 +201,7 @@
 }
 
 /* This is normally the Oops function. */
-void
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
 {
 	if (user_mode(regs))
 		return;
@@ -211,13 +215,17 @@
 	stop_watchdog();
 #endif
 
+	oops_enter();
 	handle_BUG(regs);
 
-	printk("%s: %04lx\n", str, err & 0xffff);
+	pr_err("Linux %s %s\n", utsname()->release, utsname()->version);
+	pr_err("%s: %04lx\n", str, err & 0xffff);
 
 	show_registers(regs);
 
+	oops_exit();
 	oops_in_progress = 0;
+	pr_err("\n"); /* Flush mtdoops.  */
 
 #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
 	reset_watchdog();
@@ -225,8 +233,7 @@
 	do_exit(SIGSEGV);
 }
 
-void __init
-trap_init(void)
+void __init trap_init(void)
 {
 	/* Nothing needs to be done */
 }
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index c81af5b..1e7fd45 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -11,13 +11,15 @@
 #include <linux/gfp.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/proc_fs.h>
+#include <linux/kcore.h>
 #include <asm/tlb.h>
 #include <asm/sections.h>
 
 unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
 
-void __init
-mem_init(void)
+void __init mem_init(void)
 {
 	BUG_ON(!mem_map);
 
@@ -31,10 +33,36 @@
 	mem_init_print_info(NULL);
 }
 
-/* free the pages occupied by initialization code */
+/* Free a range of init pages. Virtual addresses. */
 
-void 
-free_initmem(void)
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+{
+	unsigned long addr;
+
+	for (addr = begin; addr < end; addr += PAGE_SIZE) {
+		ClearPageReserved(virt_to_page(addr));
+		init_page_count(virt_to_page(addr));
+		free_page(addr);
+		totalram_pages++;
+	}
+
+	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+/* Free the pages occupied by initialization code. */
+
+void free_initmem(void)
 {
 	free_initmem_default(-1);
 }
+
+/* Free the pages occupied by initrd code. */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	free_init_pages("initrd memory",
+	                start,
+	                end);
+}
+#endif
diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c
index f9ca44b..80fdb99 100644
--- a/arch/cris/mm/ioremap.c
+++ b/arch/cris/mm/ioremap.c
@@ -76,10 +76,11 @@
  * Must be freed with iounmap.
  */
 
-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
 {
         return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
 }
+EXPORT_SYMBOL(ioremap_nocache);
 
 void iounmap(volatile void __iomem *addr)
 {
diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
index 2635117..69952c1 100644
--- a/arch/hexagon/include/asm/cache.h
+++ b/arch/hexagon/include/asm/cache.h
@@ -1,7 +1,7 @@
 /*
  * Cache definitions for the Hexagon architecture
  *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,8 @@
 #define L1_CACHE_SHIFT		(5)
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
+#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
+
 #define __cacheline_aligned	__aligned(L1_CACHE_BYTES)
 #define ____cacheline_aligned	__aligned(L1_CACHE_BYTES)
 
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
index 49e0896..b86f9f3 100644
--- a/arch/hexagon/include/asm/cacheflush.h
+++ b/arch/hexagon/include/asm/cacheflush.h
@@ -21,10 +21,7 @@
 #ifndef _ASM_CACHEFLUSH_H
 #define _ASM_CACHEFLUSH_H
 
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <asm/string.h>
-#include <asm-generic/cacheflush.h>
+#include <linux/mm_types.h>
 
 /* Cache flushing:
  *
@@ -41,6 +38,20 @@
 #define LINESIZE	32
 #define LINEBITS	5
 
+#define flush_cache_all()			do { } while (0)
+#define flush_cache_mm(mm)			do { } while (0)
+#define flush_cache_dup_mm(mm)			do { } while (0)
+#define flush_cache_range(vma, start, end)	do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page)			do { } while (0)
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+#define flush_icache_page(vma, pg)		do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
+#define flush_cache_vmap(start, end)		do { } while (0)
+#define flush_cache_vunmap(start, end)		do { } while (0)
+
 /*
  * Flush Dcache range through current map.
  */
@@ -49,7 +60,6 @@
 /*
  * Flush Icache range through current map.
  */
-#undef flush_icache_range
 extern void flush_icache_range(unsigned long start, unsigned long end);
 
 /*
@@ -79,19 +89,11 @@
 	/*  generic_ptrace_pokedata doesn't wind up here, does it?  */
 }
 
-#undef copy_to_user_page
-static inline void copy_to_user_page(struct vm_area_struct *vma,
-					     struct page *page,
-					     unsigned long vaddr,
-					     void *dst, void *src, int len)
-{
-	memcpy(dst, src, len);
-	if (vma->vm_flags & VM_EXEC) {
-		flush_icache_range((unsigned long) dst,
-		(unsigned long) dst + len);
-	}
-}
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+		       unsigned long vaddr, void *dst, void *src, int len);
 
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
 
 extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
 extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 7029899..66f5e9a 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -24,14 +24,9 @@
 #ifdef __KERNEL__
 
 #include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <asm/string.h>
-#include <asm/mem-layout.h>
 #include <asm/iomap.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
 
 /*
  * We don't have PCI yet.
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index 0e7c1db..6981949 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -19,6 +19,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/delay.h>
 #include <linux/bootmem.h>
 #include <linux/mmzone.h>
 #include <linux/mm.h>
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 7858663..110dab1 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -1,7 +1,7 @@
 /*
  * Kernel traps/events for Hexagon processor
  *
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -423,7 +423,7 @@
 			 */
 			info.si_code = TRAP_BRKPT;
 			info.si_addr = (void __user *) pt_elr(regs);
-			send_sig_info(SIGTRAP, &info, current);
+			force_sig_info(SIGTRAP, &info, current);
 		} else {
 #ifdef CONFIG_KGDB
 			kgdb_handle_exception(pt_cause(regs), SIGTRAP,
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 44d8c47..5f268c1 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -1,7 +1,7 @@
 /*
  * Linker script for Hexagon kernel
  *
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -59,7 +59,7 @@
 	INIT_DATA_SECTION(PAGE_SIZE)
 
 	_sdata = .;
-		RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE)
+		RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
 		RO_DATA_SECTION(PAGE_SIZE)
 	_edata = .;
 
diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c
index 0c76c80..a7c6d82 100644
--- a/arch/hexagon/mm/cache.c
+++ b/arch/hexagon/mm/cache.c
@@ -127,3 +127,13 @@
 	local_irq_restore(flags);
 	mb();
 }
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+		       unsigned long vaddr, void *dst, void *src, int len)
+{
+	memcpy(dst, src, len);
+	if (vma->vm_flags & VM_EXEC) {
+		flush_icache_range((unsigned long) dst,
+		(unsigned long) dst + len);
+	}
+}
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
index 5905fd5..d27d672 100644
--- a/arch/hexagon/mm/ioremap.c
+++ b/arch/hexagon/mm/ioremap.c
@@ -20,6 +20,7 @@
 
 #include <linux/io.h>
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 
 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
 {
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 536d13b..074e52b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -20,7 +20,6 @@
 	select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
 	select HAVE_FUNCTION_TRACER
 	select HAVE_DMA_ATTRS
-	select HAVE_KVM
 	select TTY
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DMA_API_DEBUG
@@ -232,7 +231,7 @@
 config IA64_HP_SIM
 	bool "Ski-simulator"
 	select SWIOTLB
-	depends on !PM_RUNTIME
+	depends on !PM
 
 endchoice
 
@@ -640,8 +639,6 @@
 
 source "crypto/Kconfig"
 
-source "arch/ia64/kvm/Kconfig"
-
 source "lib/Kconfig"
 
 config IOMMU_HELPER
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 5441b14..970d0bd 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -53,7 +53,6 @@
 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
 core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
 core-$(CONFIG_IA64_SGI_UV)	+= arch/ia64/uv/
-core-$(CONFIG_KVM) 		+= arch/ia64/kvm/
 
 drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
 drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
deleted file mode 100644
index 4729752..0000000
--- a/arch/ia64/include/asm/kvm_host.h
+++ /dev/null
@@ -1,609 +0,0 @@
-/*
- * kvm_host.h: used for kvm module, and hold ia64-specific sections.
- *
- * Copyright (C) 2007, Intel Corporation.
- *
- * Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#ifndef __ASM_KVM_HOST_H
-#define __ASM_KVM_HOST_H
-
-#define KVM_USER_MEM_SLOTS 32
-
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
-
-/* define exit reasons from vmm to kvm*/
-#define EXIT_REASON_VM_PANIC		0
-#define EXIT_REASON_MMIO_INSTRUCTION	1
-#define EXIT_REASON_PAL_CALL		2
-#define EXIT_REASON_SAL_CALL		3
-#define EXIT_REASON_SWITCH_RR6		4
-#define EXIT_REASON_VM_DESTROY		5
-#define EXIT_REASON_EXTERNAL_INTERRUPT	6
-#define EXIT_REASON_IPI			7
-#define EXIT_REASON_PTC_G		8
-#define EXIT_REASON_DEBUG		20
-
-/*Define vmm address space and vm data space.*/
-#define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
-#define KVM_VMM_SHIFT 24
-#define KVM_VMM_BASE 0xD000000000000000
-#define VMM_SIZE (__IA64_UL_CONST(8)<<20)
-
-/*
- * Define vm_buffer, used by PAL Services, base address.
- * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
- */
-#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
-#define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
-
-/*
- * kvm guest's data area looks as follow:
- *
- *            +----------------------+	-------	KVM_VM_DATA_SIZE
- *	      |	    vcpu[n]'s data   |	 |     ___________________KVM_STK_OFFSET
- *     	      |			     |	 |    /			  |
- *     	      |	       ..........    |	 |   /vcpu's struct&stack |
- *     	      |	       ..........    |	 |  /---------------------|---- 0
- *	      |	    vcpu[5]'s data   |	 | /	   vpd		  |
- *	      |	    vcpu[4]'s data   |	 |/-----------------------|
- *	      |	    vcpu[3]'s data   |	 /	   vtlb		  |
- *	      |	    vcpu[2]'s data   |	/|------------------------|
- *	      |	    vcpu[1]'s data   |/  |	   vhpt		  |
- *	      |	    vcpu[0]'s data   |____________________________|
- *            +----------------------+	 |
- *	      |	   memory dirty log  |	 |
- *            +----------------------+	 |
- *	      |	   vm's data struct  |	 |
- *            +----------------------+	 |
- *	      |			     |	 |
- *	      |			     |	 |
- *	      |			     |	 |
- *	      |			     |	 |
- *	      |			     |	 |
- *	      |			     |	 |
- *	      |			     |	 |
- *	      |	  vm's p2m table  |	 |
- *	      |			     |	 |
- *            |			     |	 |
- *	      |			     |	 |  |
- * vm's data->|			     |   |  |
- *	      +----------------------+ ------- 0
- * To support large memory, needs to increase the size of p2m.
- * To support more vcpus, needs to ensure it has enough space to
- * hold vcpus' data.
- */
-
-#define KVM_VM_DATA_SHIFT	26
-#define KVM_VM_DATA_SIZE	(__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
-#define KVM_VM_DATA_BASE	(KVM_VMM_BASE + KVM_VM_DATA_SIZE)
-
-#define KVM_P2M_BASE		KVM_VM_DATA_BASE
-#define KVM_P2M_SIZE		(__IA64_UL_CONST(24) << 20)
-
-#define VHPT_SHIFT		16
-#define VHPT_SIZE		(__IA64_UL_CONST(1) << VHPT_SHIFT)
-#define VHPT_NUM_ENTRIES	(__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
-
-#define VTLB_SHIFT		16
-#define VTLB_SIZE		(__IA64_UL_CONST(1) << VTLB_SHIFT)
-#define VTLB_NUM_ENTRIES	(1UL << (VHPT_SHIFT-5))
-
-#define VPD_SHIFT		16
-#define VPD_SIZE		(__IA64_UL_CONST(1) << VPD_SHIFT)
-
-#define VCPU_STRUCT_SHIFT	16
-#define VCPU_STRUCT_SIZE	(__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
-
-/*
- * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
- */
-#define KVM_STK_SHIFT		16
-#define KVM_STK_OFFSET		(__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
-
-#define KVM_VM_STRUCT_SHIFT	19
-#define KVM_VM_STRUCT_SIZE	(__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
-
-#define KVM_MEM_DIRY_LOG_SHIFT	19
-#define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
-
-#ifndef __ASSEMBLY__
-
-/*Define the max vcpus and memory for Guests.*/
-#define KVM_MAX_VCPUS	(KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
-			KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
-#define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
-
-#define VMM_LOG_LEN 256
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/kvm.h>
-#include <linux/kvm_para.h>
-#include <linux/kvm_types.h>
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/page.h>
-
-struct kvm_vcpu_data {
-	char vcpu_vhpt[VHPT_SIZE];
-	char vcpu_vtlb[VTLB_SIZE];
-	char vcpu_vpd[VPD_SIZE];
-	char vcpu_struct[VCPU_STRUCT_SIZE];
-};
-
-struct kvm_vm_data {
-	char kvm_p2m[KVM_P2M_SIZE];
-	char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
-	char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
-	struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
-};
-
-#define VCPU_BASE(n)	(KVM_VM_DATA_BASE + \
-				offsetof(struct kvm_vm_data, vcpu_data[n]))
-#define KVM_VM_BASE	(KVM_VM_DATA_BASE + \
-				offsetof(struct kvm_vm_data, kvm_vm_struct))
-#define KVM_MEM_DIRTY_LOG_BASE	KVM_VM_DATA_BASE + \
-				offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
-
-#define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
-#define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
-#define VPD_BASE(n)  (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
-#define VCPU_STRUCT_BASE(n)	(VCPU_BASE(n) + \
-				offsetof(struct kvm_vcpu_data, vcpu_struct))
-
-/*IO section definitions*/
-#define IOREQ_READ      1
-#define IOREQ_WRITE     0
-
-#define STATE_IOREQ_NONE        0
-#define STATE_IOREQ_READY       1
-#define STATE_IOREQ_INPROCESS   2
-#define STATE_IORESP_READY      3
-
-/*Guest Physical address layout.*/
-#define GPFN_MEM        (0UL << 60) /* Guest pfn is normal mem */
-#define GPFN_FRAME_BUFFER   (1UL << 60) /* VGA framebuffer */
-#define GPFN_LOW_MMIO       (2UL << 60) /* Low MMIO range */
-#define GPFN_PIB        (3UL << 60) /* PIB base */
-#define GPFN_IOSAPIC        (4UL << 60) /* IOSAPIC base */
-#define GPFN_LEGACY_IO      (5UL << 60) /* Legacy I/O base */
-#define GPFN_GFW        (6UL << 60) /* Guest Firmware */
-#define GPFN_PHYS_MMIO      (7UL << 60) /* Directed MMIO Range */
-
-#define GPFN_IO_MASK        (7UL << 60) /* Guest pfn is I/O type */
-#define GPFN_INV_MASK       (1UL << 63) /* Guest pfn is invalid */
-#define INVALID_MFN       (~0UL)
-#define MEM_G   (1UL << 30)
-#define MEM_M   (1UL << 20)
-#define MMIO_START       (3 * MEM_G)
-#define MMIO_SIZE        (512 * MEM_M)
-#define VGA_IO_START     0xA0000UL
-#define VGA_IO_SIZE      0x20000
-#define LEGACY_IO_START  (MMIO_START + MMIO_SIZE)
-#define LEGACY_IO_SIZE   (64 * MEM_M)
-#define IO_SAPIC_START   0xfec00000UL
-#define IO_SAPIC_SIZE    0x100000
-#define PIB_START 0xfee00000UL
-#define PIB_SIZE 0x200000
-#define GFW_START        (4 * MEM_G - 16 * MEM_M)
-#define GFW_SIZE         (16 * MEM_M)
-
-/*Deliver mode, defined for ioapic.c*/
-#define dest_Fixed IOSAPIC_FIXED
-#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
-
-#define NMI_VECTOR      		2
-#define ExtINT_VECTOR       		0
-#define NULL_VECTOR     		(-1)
-#define IA64_SPURIOUS_INT_VECTOR    	0x0f
-
-#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
-
-/*
- *Delivery mode
- */
-#define SAPIC_DELIV_SHIFT      8
-#define SAPIC_FIXED            0x0
-#define SAPIC_LOWEST_PRIORITY  0x1
-#define SAPIC_PMI              0x2
-#define SAPIC_NMI              0x4
-#define SAPIC_INIT             0x5
-#define SAPIC_EXTINT           0x7
-
-/*
- * vcpu->requests bit members for arch
- */
-#define KVM_REQ_PTC_G		32
-#define KVM_REQ_RESUME		33
-
-struct kvm_mmio_req {
-	uint64_t addr;          /*  physical address		*/
-	uint64_t size;          /*  size in bytes		*/
-	uint64_t data;          /*  data (or paddr of data)     */
-	uint8_t state:4;
-	uint8_t dir:1;          /*  1=read, 0=write             */
-};
-
-/*Pal data struct */
-struct kvm_pal_call{
-	/*In area*/
-	uint64_t gr28;
-	uint64_t gr29;
-	uint64_t gr30;
-	uint64_t gr31;
-	/*Out area*/
-	struct ia64_pal_retval ret;
-};
-
-/* Sal data structure */
-struct kvm_sal_call{
-	/*In area*/
-	uint64_t in0;
-	uint64_t in1;
-	uint64_t in2;
-	uint64_t in3;
-	uint64_t in4;
-	uint64_t in5;
-	uint64_t in6;
-	uint64_t in7;
-	struct sal_ret_values ret;
-};
-
-/*Guest change rr6*/
-struct kvm_switch_rr6 {
-	uint64_t old_rr;
-	uint64_t new_rr;
-};
-
-union ia64_ipi_a{
-	unsigned long val;
-	struct {
-		unsigned long rv  : 3;
-		unsigned long ir  : 1;
-		unsigned long eid : 8;
-		unsigned long id  : 8;
-		unsigned long ib_base : 44;
-	};
-};
-
-union ia64_ipi_d {
-	unsigned long val;
-	struct {
-		unsigned long vector : 8;
-		unsigned long dm  : 3;
-		unsigned long ig  : 53;
-	};
-};
-
-/*ipi check exit data*/
-struct kvm_ipi_data{
-	union ia64_ipi_a addr;
-	union ia64_ipi_d data;
-};
-
-/*global purge data*/
-struct kvm_ptc_g {
-	unsigned long vaddr;
-	unsigned long rr;
-	unsigned long ps;
-	struct kvm_vcpu *vcpu;
-};
-
-/*Exit control data */
-struct exit_ctl_data{
-	uint32_t exit_reason;
-	uint32_t vm_status;
-	union {
-		struct kvm_mmio_req	ioreq;
-		struct kvm_pal_call	pal_data;
-		struct kvm_sal_call	sal_data;
-		struct kvm_switch_rr6	rr_data;
-		struct kvm_ipi_data	ipi_data;
-		struct kvm_ptc_g	ptc_g_data;
-	} u;
-};
-
-union pte_flags {
-	unsigned long val;
-	struct {
-		unsigned long p    :  1; /*0      */
-		unsigned long      :  1; /* 1     */
-		unsigned long ma   :  3; /* 2-4   */
-		unsigned long a    :  1; /* 5     */
-		unsigned long d    :  1; /* 6     */
-		unsigned long pl   :  2; /* 7-8   */
-		unsigned long ar   :  3; /* 9-11  */
-		unsigned long ppn  : 38; /* 12-49 */
-		unsigned long      :  2; /* 50-51 */
-		unsigned long ed   :  1; /* 52    */
-	};
-};
-
-union ia64_pta {
-	unsigned long val;
-	struct {
-		unsigned long ve : 1;
-		unsigned long reserved0 : 1;
-		unsigned long size : 6;
-		unsigned long vf : 1;
-		unsigned long reserved1 : 6;
-		unsigned long base : 49;
-	};
-};
-
-struct thash_cb {
-	/* THASH base information */
-	struct thash_data	*hash; /* hash table pointer */
-	union ia64_pta		pta;
-	int           num;
-};
-
-struct kvm_vcpu_stat {
-	u32 halt_wakeup;
-};
-
-struct kvm_vcpu_arch {
-	int launched;
-	int last_exit;
-	int last_run_cpu;
-	int vmm_tr_slot;
-	int vm_tr_slot;
-	int sn_rtc_tr_slot;
-
-#define KVM_MP_STATE_RUNNABLE          0
-#define KVM_MP_STATE_UNINITIALIZED     1
-#define KVM_MP_STATE_INIT_RECEIVED     2
-#define KVM_MP_STATE_HALTED            3
-	int mp_state;
-
-#define MAX_PTC_G_NUM			3
-	int ptc_g_count;
-	struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
-
-	/*halt timer to wake up sleepy vcpus*/
-	struct hrtimer hlt_timer;
-	long ht_active;
-
-	struct kvm_lapic *apic;    /* kernel irqchip context */
-	struct vpd *vpd;
-
-	/* Exit data for vmm_transition*/
-	struct exit_ctl_data exit_data;
-
-	cpumask_t cache_coherent_map;
-
-	unsigned long vmm_rr;
-	unsigned long host_rr6;
-	unsigned long psbits[8];
-	unsigned long cr_iipa;
-	unsigned long cr_isr;
-	unsigned long vsa_base;
-	unsigned long dirty_log_lock_pa;
-	unsigned long __gp;
-	/* TR and TC.  */
-	struct thash_data itrs[NITRS];
-	struct thash_data dtrs[NDTRS];
-	/* Bit is set if there is a tr/tc for the region.  */
-	unsigned char itr_regions;
-	unsigned char dtr_regions;
-	unsigned char tc_regions;
-	/* purge all */
-	unsigned long ptce_base;
-	unsigned long ptce_count[2];
-	unsigned long ptce_stride[2];
-	/* itc/itm */
-	unsigned long last_itc;
-	long itc_offset;
-	unsigned long itc_check;
-	unsigned long timer_check;
-	unsigned int timer_pending;
-	unsigned int timer_fired;
-
-	unsigned long vrr[8];
-	unsigned long ibr[8];
-	unsigned long dbr[8];
-	unsigned long insvc[4];		/* Interrupt in service.  */
-	unsigned long xtp;
-
-	unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
-	unsigned long metaphysical_rr4;	/* from kvm_arch (so is pinned) */
-	unsigned long metaphysical_saved_rr0; /* from kvm_arch          */
-	unsigned long metaphysical_saved_rr4; /* from kvm_arch          */
-	unsigned long fp_psr;       /*used for lazy float register */
-	unsigned long saved_gp;
-	/*for phycial  emulation */
-	int mode_flags;
-	struct thash_cb vtlb;
-	struct thash_cb vhpt;
-	char irq_check;
-	char irq_new_pending;
-
-	unsigned long opcode;
-	unsigned long cause;
-	char log_buf[VMM_LOG_LEN];
-	union context host;
-	union context guest;
-
-	char mmio_data[8];
-};
-
-struct kvm_vm_stat {
-	u64 remote_tlb_flush;
-};
-
-struct kvm_sal_data {
-	unsigned long boot_ip;
-	unsigned long boot_gp;
-};
-
-struct kvm_arch_memory_slot {
-};
-
-struct kvm_arch {
-	spinlock_t dirty_log_lock;
-
-	unsigned long	vm_base;
-	unsigned long	metaphysical_rr0;
-	unsigned long	metaphysical_rr4;
-	unsigned long	vmm_init_rr;
-
-	int		is_sn2;
-
-	struct kvm_ioapic *vioapic;
-	struct kvm_vm_stat stat;
-	struct kvm_sal_data rdv_sal_data;
-
-	struct list_head assigned_dev_head;
-	struct iommu_domain *iommu_domain;
-	bool iommu_noncoherent;
-
-	unsigned long irq_sources_bitmap;
-	unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
-};
-
-union cpuid3_t {
-	u64 value;
-	struct {
-		u64 number : 8;
-		u64 revision : 8;
-		u64 model : 8;
-		u64 family : 8;
-		u64 archrev : 8;
-		u64 rv : 24;
-	};
-};
-
-struct kvm_pt_regs {
-	/* The following registers are saved by SAVE_MIN: */
-	unsigned long b6;  /* scratch */
-	unsigned long b7;  /* scratch */
-
-	unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
-	unsigned long ar_ssd; /* reserved for future use (scratch) */
-
-	unsigned long r8;  /* scratch (return value register 0) */
-	unsigned long r9;  /* scratch (return value register 1) */
-	unsigned long r10; /* scratch (return value register 2) */
-	unsigned long r11; /* scratch (return value register 3) */
-
-	unsigned long cr_ipsr; /* interrupted task's psr */
-	unsigned long cr_iip;  /* interrupted task's instruction pointer */
-	unsigned long cr_ifs;  /* interrupted task's function state */
-
-	unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
-	unsigned long ar_pfs;  /* prev function state  */
-	unsigned long ar_rsc;  /* RSE configuration */
-	/* The following two are valid only if cr_ipsr.cpl > 0: */
-	unsigned long ar_rnat;  /* RSE NaT */
-	unsigned long ar_bspstore; /* RSE bspstore */
-
-	unsigned long pr;  /* 64 predicate registers (1 bit each) */
-	unsigned long b0;  /* return pointer (bp) */
-	unsigned long loadrs;  /* size of dirty partition << 16 */
-
-	unsigned long r1;  /* the gp pointer */
-	unsigned long r12; /* interrupted task's memory stack pointer */
-	unsigned long r13; /* thread pointer */
-
-	unsigned long ar_fpsr;  /* floating point status (preserved) */
-	unsigned long r15;  /* scratch */
-
-	/* The remaining registers are NOT saved for system calls.  */
-	unsigned long r14;  /* scratch */
-	unsigned long r2;  /* scratch */
-	unsigned long r3;  /* scratch */
-	unsigned long r16;  /* scratch */
-	unsigned long r17;  /* scratch */
-	unsigned long r18;  /* scratch */
-	unsigned long r19;  /* scratch */
-	unsigned long r20;  /* scratch */
-	unsigned long r21;  /* scratch */
-	unsigned long r22;  /* scratch */
-	unsigned long r23;  /* scratch */
-	unsigned long r24;  /* scratch */
-	unsigned long r25;  /* scratch */
-	unsigned long r26;  /* scratch */
-	unsigned long r27;  /* scratch */
-	unsigned long r28;  /* scratch */
-	unsigned long r29;  /* scratch */
-	unsigned long r30;  /* scratch */
-	unsigned long r31;  /* scratch */
-	unsigned long ar_ccv;  /* compare/exchange value (scratch) */
-
-	/*
-	 * Floating point registers that the kernel considers scratch:
-	 */
-	struct ia64_fpreg f6;  /* scratch */
-	struct ia64_fpreg f7;  /* scratch */
-	struct ia64_fpreg f8;  /* scratch */
-	struct ia64_fpreg f9;  /* scratch */
-	struct ia64_fpreg f10;  /* scratch */
-	struct ia64_fpreg f11;  /* scratch */
-
-	unsigned long r4;  /* preserved */
-	unsigned long r5;  /* preserved */
-	unsigned long r6;  /* preserved */
-	unsigned long r7;  /* preserved */
-	unsigned long eml_unat;    /* used for emulating instruction */
-	unsigned long pad0;     /* alignment pad */
-};
-
-static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
-{
-	return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
-}
-
-typedef int kvm_vmm_entry(void);
-typedef void kvm_tramp_entry(union context *host, union context *guest);
-
-struct kvm_vmm_info{
-	struct module	*module;
-	kvm_vmm_entry 	*vmm_entry;
-	kvm_tramp_entry *tramp_entry;
-	unsigned long 	vmm_ivt;
-	unsigned long	patch_mov_ar;
-	unsigned long	patch_mov_ar_sn2;
-};
-
-int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
-int kvm_emulate_halt(struct kvm_vcpu *vcpu);
-int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-void kvm_sal_emul(struct kvm_vcpu *vcpu);
-
-#define __KVM_HAVE_ARCH_VM_ALLOC 1
-struct kvm *kvm_arch_alloc_vm(void);
-void kvm_arch_free_vm(struct kvm *kvm);
-
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_free_memslot(struct kvm *kvm,
-		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
-static inline void kvm_arch_commit_memory_region(struct kvm *kvm,
-		struct kvm_userspace_memory_region *mem,
-		const struct kvm_memory_slot *old,
-		enum kvm_mr_change change) {}
-static inline void kvm_arch_hardware_unsetup(void) {}
-
-#endif /* __ASSEMBLY__*/
-
-#endif
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 14aa1c5..0ec484d 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -35,8 +35,8 @@
 
 /*
  * Be extremely careful when taking the address of this variable!  Due to virtual
- * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
- * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
+ * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly
  * more efficient.
  */
 #define __ia64_per_cpu_var(var) (*({					\
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
deleted file mode 100644
index 42b233b..0000000
--- a/arch/ia64/include/asm/pvclock-abi.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * same structure to x86's
- * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
- * For now, define same duplicated definitions.
- */
-
-#ifndef _ASM_IA64__PVCLOCK_ABI_H
-#define _ASM_IA64__PVCLOCK_ABI_H
-#ifndef __ASSEMBLY__
-
-/*
- * These structs MUST NOT be changed.
- * They are the ABI between hypervisor and guest OS.
- * KVM is using this.
- *
- * pvclock_vcpu_time_info holds the system time and the tsc timestamp
- * of the last update. So the guest can use the tsc delta to get a
- * more precise system time.  There is one per virtual cpu.
- *
- * pvclock_wall_clock references the point in time when the system
- * time was zero (usually boot time), thus the guest calculates the
- * current wall clock by adding the system time.
- *
- * Protocol for the "version" fields is: hypervisor raises it (making
- * it uneven) before it starts updating the fields and raises it again
- * (making it even) when it is done.  Thus the guest can make sure the
- * time values it got are consistent by checking the version before
- * and after reading them.
- */
-
-struct pvclock_vcpu_time_info {
-	u32   version;
-	u32   pad0;
-	u64   tsc_timestamp;
-	u64   system_time;
-	u32   tsc_to_system_mul;
-	s8    tsc_shift;
-	u8    pad[3];
-} __attribute__((__packed__)); /* 32 bytes */
-
-struct pvclock_wall_clock {
-	u32   version;
-	u32   sec;
-	u32   nsec;
-} __attribute__((__packed__));
-
-#endif /* __ASSEMBLY__ */
-#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/uapi/asm/kvm.h b/arch/ia64/include/uapi/asm/kvm.h
deleted file mode 100644
index 99503c2..0000000
--- a/arch/ia64/include/uapi/asm/kvm.h
+++ /dev/null
@@ -1,268 +0,0 @@
-#ifndef __ASM_IA64_KVM_H
-#define __ASM_IA64_KVM_H
-
-/*
- * kvm structure definitions  for ia64
- *
- * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/* Select x86 specific features in <linux/kvm.h> */
-#define __KVM_HAVE_IOAPIC
-#define __KVM_HAVE_IRQ_LINE
-
-/* Architectural interrupt line count. */
-#define KVM_NR_INTERRUPTS 256
-
-#define KVM_IOAPIC_NUM_PINS  48
-
-struct kvm_ioapic_state {
-	__u64 base_address;
-	__u32 ioregsel;
-	__u32 id;
-	__u32 irr;
-	__u32 pad;
-	union {
-		__u64 bits;
-		struct {
-			__u8 vector;
-			__u8 delivery_mode:3;
-			__u8 dest_mode:1;
-			__u8 delivery_status:1;
-			__u8 polarity:1;
-			__u8 remote_irr:1;
-			__u8 trig_mode:1;
-			__u8 mask:1;
-			__u8 reserve:7;
-			__u8 reserved[4];
-			__u8 dest_id;
-		} fields;
-	} redirtbl[KVM_IOAPIC_NUM_PINS];
-};
-
-#define KVM_IRQCHIP_PIC_MASTER   0
-#define KVM_IRQCHIP_PIC_SLAVE    1
-#define KVM_IRQCHIP_IOAPIC       2
-#define KVM_NR_IRQCHIPS          3
-
-#define KVM_CONTEXT_SIZE	8*1024
-
-struct kvm_fpreg {
-	union {
-		unsigned long bits[2];
-		long double __dummy;	/* force 16-byte alignment */
-	} u;
-};
-
-union context {
-	/* 8K size */
-	char	dummy[KVM_CONTEXT_SIZE];
-	struct {
-		unsigned long       psr;
-		unsigned long       pr;
-		unsigned long       caller_unat;
-		unsigned long       pad;
-		unsigned long       gr[32];
-		unsigned long       ar[128];
-		unsigned long       br[8];
-		unsigned long       cr[128];
-		unsigned long       rr[8];
-		unsigned long       ibr[8];
-		unsigned long       dbr[8];
-		unsigned long       pkr[8];
-		struct kvm_fpreg   fr[128];
-	};
-};
-
-struct thash_data {
-	union {
-		struct {
-			unsigned long p    :  1; /* 0 */
-			unsigned long rv1  :  1; /* 1 */
-			unsigned long ma   :  3; /* 2-4 */
-			unsigned long a    :  1; /* 5 */
-			unsigned long d    :  1; /* 6 */
-			unsigned long pl   :  2; /* 7-8 */
-			unsigned long ar   :  3; /* 9-11 */
-			unsigned long ppn  : 38; /* 12-49 */
-			unsigned long rv2  :  2; /* 50-51 */
-			unsigned long ed   :  1; /* 52 */
-			unsigned long ig1  : 11; /* 53-63 */
-		};
-		struct {
-			unsigned long __rv1 : 53;     /* 0-52 */
-			unsigned long contiguous : 1; /*53 */
-			unsigned long tc : 1;         /* 54 TR or TC */
-			unsigned long cl : 1;
-			/* 55 I side or D side cache line */
-			unsigned long len  :  4;      /* 56-59 */
-			unsigned long io  : 1;	/* 60 entry is for io or not */
-			unsigned long nomap : 1;
-			/* 61 entry cann't be inserted into machine TLB.*/
-			unsigned long checked : 1;
-			/* 62 for VTLB/VHPT sanity check */
-			unsigned long invalid : 1;
-			/* 63 invalid entry */
-		};
-		unsigned long page_flags;
-	};                  /* same for VHPT and TLB */
-
-	union {
-		struct {
-			unsigned long rv3  :  2;
-			unsigned long ps   :  6;
-			unsigned long key  : 24;
-			unsigned long rv4  : 32;
-		};
-		unsigned long itir;
-	};
-	union {
-		struct {
-			unsigned long ig2  :  12;
-			unsigned long vpn  :  49;
-			unsigned long vrn  :   3;
-		};
-		unsigned long ifa;
-		unsigned long vadr;
-		struct {
-			unsigned long tag  :  63;
-			unsigned long ti   :  1;
-		};
-		unsigned long etag;
-	};
-	union {
-		struct thash_data *next;
-		unsigned long rid;
-		unsigned long gpaddr;
-	};
-};
-
-#define	NITRS	8
-#define NDTRS	8
-
-struct saved_vpd {
-	unsigned long  vhpi;
-	unsigned long  vgr[16];
-	unsigned long  vbgr[16];
-	unsigned long  vnat;
-	unsigned long  vbnat;
-	unsigned long  vcpuid[5];
-	unsigned long  vpsr;
-	unsigned long  vpr;
-	union {
-		unsigned long  vcr[128];
-		struct {
-			unsigned long dcr;
-			unsigned long itm;
-			unsigned long iva;
-			unsigned long rsv1[5];
-			unsigned long pta;
-			unsigned long rsv2[7];
-			unsigned long ipsr;
-			unsigned long isr;
-			unsigned long rsv3;
-			unsigned long iip;
-			unsigned long ifa;
-			unsigned long itir;
-			unsigned long iipa;
-			unsigned long ifs;
-			unsigned long iim;
-			unsigned long iha;
-			unsigned long rsv4[38];
-			unsigned long lid;
-			unsigned long ivr;
-			unsigned long tpr;
-			unsigned long eoi;
-			unsigned long irr[4];
-			unsigned long itv;
-			unsigned long pmv;
-			unsigned long cmcv;
-			unsigned long rsv5[5];
-			unsigned long lrr0;
-			unsigned long lrr1;
-			unsigned long rsv6[46];
-		};
-	};
-};
-
-struct kvm_regs {
-	struct saved_vpd vpd;
-	/*Arch-regs*/
-	int mp_state;
-	unsigned long vmm_rr;
-	/* TR and TC.  */
-	struct thash_data itrs[NITRS];
-	struct thash_data dtrs[NDTRS];
-	/* Bit is set if there is a tr/tc for the region.  */
-	unsigned char itr_regions;
-	unsigned char dtr_regions;
-	unsigned char tc_regions;
-
-	char irq_check;
-	unsigned long saved_itc;
-	unsigned long itc_check;
-	unsigned long timer_check;
-	unsigned long timer_pending;
-	unsigned long last_itc;
-
-	unsigned long vrr[8];
-	unsigned long ibr[8];
-	unsigned long dbr[8];
-	unsigned long insvc[4];		/* Interrupt in service.  */
-	unsigned long xtp;
-
-	unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
-	unsigned long metaphysical_rr4;	/* from kvm_arch (so is pinned) */
-	unsigned long metaphysical_saved_rr0; /* from kvm_arch          */
-	unsigned long metaphysical_saved_rr4; /* from kvm_arch          */
-	unsigned long fp_psr;       /*used for lazy float register */
-	unsigned long saved_gp;
-	/*for phycial  emulation */
-
-	union context saved_guest;
-
-	unsigned long reserved[64];	/* for future use */
-};
-
-struct kvm_sregs {
-};
-
-struct kvm_fpu {
-};
-
-#define KVM_IA64_VCPU_STACK_SHIFT	16
-#define KVM_IA64_VCPU_STACK_SIZE	(1UL << KVM_IA64_VCPU_STACK_SHIFT)
-
-struct kvm_ia64_vcpu_stack {
-	unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
-};
-
-struct kvm_debug_exit_arch {
-};
-
-/* for KVM_SET_GUEST_DEBUG */
-struct kvm_guest_debug_arch {
-};
-
-/* definition of registers in kvm_run */
-struct kvm_sync_regs {
-};
-
-#endif
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
deleted file mode 100644
index 3d50ea9..0000000
--- a/arch/ia64/kvm/Kconfig
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# KVM configuration
-#
-
-source "virt/kvm/Kconfig"
-
-menuconfig VIRTUALIZATION
-	bool "Virtualization"
-	depends on HAVE_KVM || IA64
-	default y
-	---help---
-	  Say Y here to get to see options for using your Linux host to run other
-	  operating systems inside virtual machines (guests).
-	  This option alone does not add any kernel code.
-
-	  If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-config KVM
-	tristate "Kernel-based Virtual Machine (KVM) support"
-	depends on BROKEN
-	depends on HAVE_KVM && MODULES
-	depends on BROKEN
-	select PREEMPT_NOTIFIERS
-	select ANON_INODES
-	select HAVE_KVM_IRQCHIP
-	select HAVE_KVM_IRQFD
-	select HAVE_KVM_IRQ_ROUTING
-	select KVM_APIC_ARCHITECTURE
-	select KVM_MMIO
-	---help---
-	  Support hosting fully virtualized guest machines using hardware
-	  virtualization extensions.  You will need a fairly recent
-	  processor equipped with virtualization extensions. You will also
-	  need to select one or more of the processor modules below.
-
-	  This module provides access to the hardware capabilities through
-	  a character device node named /dev/kvm.
-
-	  To compile this as a module, choose M here: the module
-	  will be called kvm.
-
-	  If unsure, say N.
-
-config KVM_INTEL
-	tristate "KVM for Intel Itanium 2 processors support"
-	depends on KVM && m
-	---help---
-	  Provides support for KVM on Itanium 2 processors equipped with the VT
-	  extensions.
-
-config KVM_DEVICE_ASSIGNMENT
-	bool "KVM legacy PCI device assignment support"
-	depends on KVM && PCI && IOMMU_API
-	default y
-	---help---
-	  Provide support for legacy PCI device assignment through KVM.  The
-	  kernel now also supports a full featured userspace device driver
-	  framework through VFIO, which supersedes much of this support.
-
-	  If unsure, say Y.
-
-source drivers/vhost/Kconfig
-
-endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
deleted file mode 100644
index 18e45ec..0000000
--- a/arch/ia64/kvm/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-#This Make file is to generate asm-offsets.h and build source.
-#
-
-#Generate asm-offsets.h for vmm module build
-offsets-file := asm-offsets.h
-
-always  := $(offsets-file)
-targets := $(offsets-file)
-targets += arch/ia64/kvm/asm-offsets.s
-
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
-	"/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
-endef
-
-quiet_cmd_offsets = GEN     $@
-define cmd_offsets
-	(set -e; \
-	 echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
-	 echo "#define __ASM_KVM_OFFSETS_H__"; \
-	 echo "/*"; \
-	 echo " * DO NOT MODIFY."; \
-	 echo " *"; \
-	 echo " * This file was generated by Makefile"; \
-	 echo " *"; \
-	 echo " */"; \
-	 echo ""; \
-	 sed -ne $(sed-y) $<; \
-	 echo ""; \
-	 echo "#endif" ) > $@
-endef
-
-# We use internal rules to avoid the "is up to date" message from make
-arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \
-			$(wildcard $(srctree)/arch/ia64/include/asm/*.h)\
-			$(wildcard $(srctree)/include/linux/*.h)
-	$(call if_changed_dep,cc_s_c)
-
-$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
-	$(call cmd,offsets)
-
-FORCE : $(obj)/$(offsets-file)
-
-#
-# Makefile for Kernel-based Virtual Machine module
-#
-
-ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
-asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
-KVM := ../../../virt/kvm
-
-common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
-		$(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
-
-ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
-common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
-endif
-
-kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
-obj-$(CONFIG_KVM) += kvm.o
-
-CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
-kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
-	vtlb.o process.o kvm_lib.o
-#Add link memcpy and memset to avoid possible structure assignment error
-kvm-intel-objs += memcpy.o memset.o
-obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
deleted file mode 100644
index 9324c87..0000000
--- a/arch/ia64/kvm/asm-offsets.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * asm-offsets.c Generate definitions needed by assembly language modules.
- * This code generates raw asm output which is post-processed
- * to extract and format the required data.
- *
- * Anthony Xu    <anthony.xu@intel.com>
- * Xiantao Zhang <xiantao.zhang@intel.com>
- * Copyright (c) 2007 Intel Corporation  KVM support.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/kvm_host.h>
-#include <linux/kbuild.h>
-
-#include "vcpu.h"
-
-void foo(void)
-{
-	DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
-	DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
-
-	BLANK();
-
-	DEFINE(VMM_VCPU_META_RR0_OFFSET,
-			offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
-	DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
-			offsetof(struct kvm_vcpu,
-				arch.metaphysical_saved_rr0));
-	DEFINE(VMM_VCPU_VRR0_OFFSET,
-			offsetof(struct kvm_vcpu, arch.vrr[0]));
-	DEFINE(VMM_VPD_IRR0_OFFSET,
-			offsetof(struct vpd, irr[0]));
-	DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
-			offsetof(struct kvm_vcpu, arch.itc_check));
-	DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
-			offsetof(struct kvm_vcpu, arch.irq_check));
-	DEFINE(VMM_VPD_VHPI_OFFSET,
-			offsetof(struct vpd, vhpi));
-	DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
-			offsetof(struct kvm_vcpu, arch.vsa_base));
-	DEFINE(VMM_VCPU_VPD_OFFSET,
-			offsetof(struct kvm_vcpu, arch.vpd));
-	DEFINE(VMM_VCPU_IRQ_CHECK,
-			offsetof(struct kvm_vcpu, arch.irq_check));
-	DEFINE(VMM_VCPU_TIMER_PENDING,
-			offsetof(struct kvm_vcpu, arch.timer_pending));
-	DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
-			offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
-	DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
-			offsetof(struct kvm_vcpu, arch.mode_flags));
-	DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
-			offsetof(struct kvm_vcpu, arch.itc_offset));
-	DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
-			offsetof(struct kvm_vcpu, arch.last_itc));
-	DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
-			offsetof(struct kvm_vcpu, arch.saved_gp));
-
-	BLANK();
-
-	DEFINE(VMM_PT_REGS_B6_OFFSET,
-				offsetof(struct kvm_pt_regs, b6));
-	DEFINE(VMM_PT_REGS_B7_OFFSET,
-				offsetof(struct kvm_pt_regs, b7));
-	DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_csd));
-	DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_ssd));
-	DEFINE(VMM_PT_REGS_R8_OFFSET,
-				offsetof(struct kvm_pt_regs, r8));
-	DEFINE(VMM_PT_REGS_R9_OFFSET,
-				offsetof(struct kvm_pt_regs, r9));
-	DEFINE(VMM_PT_REGS_R10_OFFSET,
-				offsetof(struct kvm_pt_regs, r10));
-	DEFINE(VMM_PT_REGS_R11_OFFSET,
-				offsetof(struct kvm_pt_regs, r11));
-	DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
-				offsetof(struct kvm_pt_regs, cr_ipsr));
-	DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
-				offsetof(struct kvm_pt_regs, cr_iip));
-	DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
-				offsetof(struct kvm_pt_regs, cr_ifs));
-	DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_unat));
-	DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_pfs));
-	DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_rsc));
-	DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_rnat));
-
-	DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_bspstore));
-	DEFINE(VMM_PT_REGS_PR_OFFSET,
-				offsetof(struct kvm_pt_regs, pr));
-	DEFINE(VMM_PT_REGS_B0_OFFSET,
-				offsetof(struct kvm_pt_regs, b0));
-	DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
-				offsetof(struct kvm_pt_regs, loadrs));
-	DEFINE(VMM_PT_REGS_R1_OFFSET,
-				offsetof(struct kvm_pt_regs, r1));
-	DEFINE(VMM_PT_REGS_R12_OFFSET,
-				offsetof(struct kvm_pt_regs, r12));
-	DEFINE(VMM_PT_REGS_R13_OFFSET,
-				offsetof(struct kvm_pt_regs, r13));
-	DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_fpsr));
-	DEFINE(VMM_PT_REGS_R15_OFFSET,
-				offsetof(struct kvm_pt_regs, r15));
-	DEFINE(VMM_PT_REGS_R14_OFFSET,
-				offsetof(struct kvm_pt_regs, r14));
-	DEFINE(VMM_PT_REGS_R2_OFFSET,
-				offsetof(struct kvm_pt_regs, r2));
-	DEFINE(VMM_PT_REGS_R3_OFFSET,
-				offsetof(struct kvm_pt_regs, r3));
-	DEFINE(VMM_PT_REGS_R16_OFFSET,
-				offsetof(struct kvm_pt_regs, r16));
-	DEFINE(VMM_PT_REGS_R17_OFFSET,
-				offsetof(struct kvm_pt_regs, r17));
-	DEFINE(VMM_PT_REGS_R18_OFFSET,
-				offsetof(struct kvm_pt_regs, r18));
-	DEFINE(VMM_PT_REGS_R19_OFFSET,
-				offsetof(struct kvm_pt_regs, r19));
-	DEFINE(VMM_PT_REGS_R20_OFFSET,
-				offsetof(struct kvm_pt_regs, r20));
-	DEFINE(VMM_PT_REGS_R21_OFFSET,
-				offsetof(struct kvm_pt_regs, r21));
-	DEFINE(VMM_PT_REGS_R22_OFFSET,
-				offsetof(struct kvm_pt_regs, r22));
-	DEFINE(VMM_PT_REGS_R23_OFFSET,
-				offsetof(struct kvm_pt_regs, r23));
-	DEFINE(VMM_PT_REGS_R24_OFFSET,
-				offsetof(struct kvm_pt_regs, r24));
-	DEFINE(VMM_PT_REGS_R25_OFFSET,
-				offsetof(struct kvm_pt_regs, r25));
-	DEFINE(VMM_PT_REGS_R26_OFFSET,
-				offsetof(struct kvm_pt_regs, r26));
-	DEFINE(VMM_PT_REGS_R27_OFFSET,
-				offsetof(struct kvm_pt_regs, r27));
-	DEFINE(VMM_PT_REGS_R28_OFFSET,
-				offsetof(struct kvm_pt_regs, r28));
-	DEFINE(VMM_PT_REGS_R29_OFFSET,
-				offsetof(struct kvm_pt_regs, r29));
-	DEFINE(VMM_PT_REGS_R30_OFFSET,
-				offsetof(struct kvm_pt_regs, r30));
-	DEFINE(VMM_PT_REGS_R31_OFFSET,
-				offsetof(struct kvm_pt_regs, r31));
-	DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
-				offsetof(struct kvm_pt_regs, ar_ccv));
-	DEFINE(VMM_PT_REGS_F6_OFFSET,
-				offsetof(struct kvm_pt_regs, f6));
-	DEFINE(VMM_PT_REGS_F7_OFFSET,
-				offsetof(struct kvm_pt_regs, f7));
-	DEFINE(VMM_PT_REGS_F8_OFFSET,
-				offsetof(struct kvm_pt_regs, f8));
-	DEFINE(VMM_PT_REGS_F9_OFFSET,
-				offsetof(struct kvm_pt_regs, f9));
-	DEFINE(VMM_PT_REGS_F10_OFFSET,
-				offsetof(struct kvm_pt_regs, f10));
-	DEFINE(VMM_PT_REGS_F11_OFFSET,
-				offsetof(struct kvm_pt_regs, f11));
-	DEFINE(VMM_PT_REGS_R4_OFFSET,
-				offsetof(struct kvm_pt_regs, r4));
-	DEFINE(VMM_PT_REGS_R5_OFFSET,
-				offsetof(struct kvm_pt_regs, r5));
-	DEFINE(VMM_PT_REGS_R6_OFFSET,
-				offsetof(struct kvm_pt_regs, r6));
-	DEFINE(VMM_PT_REGS_R7_OFFSET,
-				offsetof(struct kvm_pt_regs, r7));
-	DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
-				offsetof(struct kvm_pt_regs, eml_unat));
-	DEFINE(VMM_VCPU_IIPA_OFFSET,
-				offsetof(struct kvm_vcpu, arch.cr_iipa));
-	DEFINE(VMM_VCPU_OPCODE_OFFSET,
-				offsetof(struct kvm_vcpu, arch.opcode));
-	DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
-	DEFINE(VMM_VCPU_ISR_OFFSET,
-				offsetof(struct kvm_vcpu, arch.cr_isr));
-	DEFINE(VMM_PT_REGS_R16_SLOT,
-				(((offsetof(struct kvm_pt_regs, r16)
-				- sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
-	DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
-				offsetof(struct kvm_vcpu, arch.mode_flags));
-	DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
-	BLANK();
-
-	DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
-	DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
-	DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
-			offsetof(struct kvm_vcpu, arch.insvc[0]));
-	DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
-	DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
-
-	DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
-	DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
-	DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
-	DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
-	DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
-	DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
-	DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
-	DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
-	DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
-	DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
-	DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
-	DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
-	DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
-	DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
-	DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
-	DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
-	DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
-	DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
-	DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
-	DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
-	DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
-	DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
-	DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
-	DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
-	DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
-	DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
-	DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
-	DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
-	DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
-	DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
-	DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
-	DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
-	DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
-	DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
-	BLANK();
-}
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h
deleted file mode 100644
index c0785a7..0000000
--- a/arch/ia64/kvm/irq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * irq.h: In-kernel interrupt controller related definitions
- * Copyright (c) 2008, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- *   Xiantao Zhang <xiantao.zhang@intel.com>
- *
- */
-
-#ifndef __IRQ_H
-#define __IRQ_H
-
-#include "lapic.h"
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
-	return 1;
-}
-
-#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
deleted file mode 100644
index dbe46f4..0000000
--- a/arch/ia64/kvm/kvm-ia64.c
+++ /dev/null
@@ -1,1942 +0,0 @@
-/*
- * kvm_ia64.c: Basic KVM support On Itanium series processors
- *
- *
- * 	Copyright (C) 2007, Intel Corporation.
- *  	Xiantao Zhang  (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/percpu.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/kvm_host.h>
-#include <linux/kvm.h>
-#include <linux/bitops.h>
-#include <linux/hrtimer.h>
-#include <linux/uaccess.h>
-#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
-#include <linux/pci.h>
-
-#include <asm/pgtable.h>
-#include <asm/gcc_intrin.h>
-#include <asm/pal.h>
-#include <asm/cacheflush.h>
-#include <asm/div64.h>
-#include <asm/tlb.h>
-#include <asm/elf.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/shub_mmr.h>
-
-#include "misc.h"
-#include "vti.h"
-#include "iodev.h"
-#include "ioapic.h"
-#include "lapic.h"
-#include "irq.h"
-
-static unsigned long kvm_vmm_base;
-static unsigned long kvm_vsa_base;
-static unsigned long kvm_vm_buffer;
-static unsigned long kvm_vm_buffer_size;
-unsigned long kvm_vmm_gp;
-
-static long vp_env_info;
-
-static struct kvm_vmm_info *kvm_vmm_info;
-
-static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
-
-struct kvm_stats_debugfs_item debugfs_entries[] = {
-	{ NULL }
-};
-
-static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
-{
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
-	if (vcpu->kvm->arch.is_sn2)
-		return rtc_time();
-	else
-#endif
-		return ia64_getreg(_IA64_REG_AR_ITC);
-}
-
-static void kvm_flush_icache(unsigned long start, unsigned long len)
-{
-	int l;
-
-	for (l = 0; l < (len + 32); l += 32)
-		ia64_fc((void *)(start + l));
-
-	ia64_sync_i();
-	ia64_srlz_i();
-}
-
-static void kvm_flush_tlb_all(void)
-{
-	unsigned long i, j, count0, count1, stride0, stride1, addr;
-	long flags;
-
-	addr    = local_cpu_data->ptce_base;
-	count0  = local_cpu_data->ptce_count[0];
-	count1  = local_cpu_data->ptce_count[1];
-	stride0 = local_cpu_data->ptce_stride[0];
-	stride1 = local_cpu_data->ptce_stride[1];
-
-	local_irq_save(flags);
-	for (i = 0; i < count0; ++i) {
-		for (j = 0; j < count1; ++j) {
-			ia64_ptce(addr);
-			addr += stride1;
-		}
-		addr += stride0;
-	}
-	local_irq_restore(flags);
-	ia64_srlz_i();			/* srlz.i implies srlz.d */
-}
-
-long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
-{
-	struct ia64_pal_retval iprv;
-
-	PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
-			(u64)opt_handler);
-
-	return iprv.status;
-}
-
-static  DEFINE_SPINLOCK(vp_lock);
-
-int kvm_arch_hardware_enable(void)
-{
-	long  status;
-	long  tmp_base;
-	unsigned long pte;
-	unsigned long saved_psr;
-	int slot;
-
-	pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
-	local_irq_save(saved_psr);
-	slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
-	local_irq_restore(saved_psr);
-	if (slot < 0)
-		return -EINVAL;
-
-	spin_lock(&vp_lock);
-	status = ia64_pal_vp_init_env(kvm_vsa_base ?
-				VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
-			__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
-	if (status != 0) {
-		spin_unlock(&vp_lock);
-		printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
-		return -EINVAL;
-	}
-
-	if (!kvm_vsa_base) {
-		kvm_vsa_base = tmp_base;
-		printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
-	}
-	spin_unlock(&vp_lock);
-	ia64_ptr_entry(0x3, slot);
-
-	return 0;
-}
-
-void kvm_arch_hardware_disable(void)
-{
-
-	long status;
-	int slot;
-	unsigned long pte;
-	unsigned long saved_psr;
-	unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
-
-	pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
-				PAGE_KERNEL));
-
-	local_irq_save(saved_psr);
-	slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
-	local_irq_restore(saved_psr);
-	if (slot < 0)
-		return;
-
-	status = ia64_pal_vp_exit_env(host_iva);
-	if (status)
-		printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
-				status);
-	ia64_ptr_entry(0x3, slot);
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
-	*(int *)rtn = 0;
-}
-
-int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
-
-	int r;
-
-	switch (ext) {
-	case KVM_CAP_IRQCHIP:
-	case KVM_CAP_MP_STATE:
-	case KVM_CAP_IRQ_INJECT_STATUS:
-	case KVM_CAP_IOAPIC_POLARITY_IGNORED:
-		r = 1;
-		break;
-	case KVM_CAP_COALESCED_MMIO:
-		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
-		break;
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-	case KVM_CAP_IOMMU:
-		r = iommu_present(&pci_bus_type);
-		break;
-#endif
-	default:
-		r = 0;
-	}
-	return r;
-
-}
-
-static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-	kvm_run->hw.hardware_exit_reason = 1;
-	return 0;
-}
-
-static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct kvm_mmio_req *p;
-	struct kvm_io_device *mmio_dev;
-	int r;
-
-	p = kvm_get_vcpu_ioreq(vcpu);
-
-	if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
-		goto mmio;
-	vcpu->mmio_needed = 1;
-	vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
-	vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
-	vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
-
-	if (vcpu->mmio_is_write)
-		memcpy(vcpu->arch.mmio_data, &p->data, p->size);
-	memcpy(kvm_run->mmio.data, &p->data, p->size);
-	kvm_run->exit_reason = KVM_EXIT_MMIO;
-	return 0;
-mmio:
-	if (p->dir)
-		r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
-				    p->size, &p->data);
-	else
-		r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
-				     p->size, &p->data);
-	if (r)
-		printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
-	p->state = STATE_IORESP_READY;
-
-	return 1;
-}
-
-static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct exit_ctl_data *p;
-
-	p = kvm_get_exit_data(vcpu);
-
-	if (p->exit_reason == EXIT_REASON_PAL_CALL)
-		return kvm_pal_emul(vcpu, kvm_run);
-	else {
-		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-		kvm_run->hw.hardware_exit_reason = 2;
-		return 0;
-	}
-}
-
-static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct exit_ctl_data *p;
-
-	p = kvm_get_exit_data(vcpu);
-
-	if (p->exit_reason == EXIT_REASON_SAL_CALL) {
-		kvm_sal_emul(vcpu);
-		return 1;
-	} else {
-		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-		kvm_run->hw.hardware_exit_reason = 3;
-		return 0;
-	}
-
-}
-
-static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
-{
-	struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
-	if (!test_and_set_bit(vector, &vpd->irr[0])) {
-		vcpu->arch.irq_new_pending = 1;
-		kvm_vcpu_kick(vcpu);
-		return 1;
-	}
-	return 0;
-}
-
-/*
- *  offset: address offset to IPI space.
- *  value:  deliver value.
- */
-static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
-				uint64_t vector)
-{
-	switch (dm) {
-	case SAPIC_FIXED:
-		break;
-	case SAPIC_NMI:
-		vector = 2;
-		break;
-	case SAPIC_EXTINT:
-		vector = 0;
-		break;
-	case SAPIC_INIT:
-	case SAPIC_PMI:
-	default:
-		printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
-		return;
-	}
-	__apic_accept_irq(vcpu, vector);
-}
-
-static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
-			unsigned long eid)
-{
-	union ia64_lid lid;
-	int i;
-	struct kvm_vcpu *vcpu;
-
-	kvm_for_each_vcpu(i, vcpu, kvm) {
-		lid.val = VCPU_LID(vcpu);
-		if (lid.id == id && lid.eid == eid)
-			return vcpu;
-	}
-
-	return NULL;
-}
-
-static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
-	struct kvm_vcpu *target_vcpu;
-	struct kvm_pt_regs *regs;
-	union ia64_ipi_a addr = p->u.ipi_data.addr;
-	union ia64_ipi_d data = p->u.ipi_data.data;
-
-	target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
-	if (!target_vcpu)
-		return handle_vm_error(vcpu, kvm_run);
-
-	if (!target_vcpu->arch.launched) {
-		regs = vcpu_regs(target_vcpu);
-
-		regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
-		regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
-
-		target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-		if (waitqueue_active(&target_vcpu->wq))
-			wake_up_interruptible(&target_vcpu->wq);
-	} else {
-		vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
-		if (target_vcpu != vcpu)
-			kvm_vcpu_kick(target_vcpu);
-	}
-
-	return 1;
-}
-
-struct call_data {
-	struct kvm_ptc_g ptc_g_data;
-	struct kvm_vcpu *vcpu;
-};
-
-static void vcpu_global_purge(void *info)
-{
-	struct call_data *p = (struct call_data *)info;
-	struct kvm_vcpu *vcpu = p->vcpu;
-
-	if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
-		return;
-
-	set_bit(KVM_REQ_PTC_G, &vcpu->requests);
-	if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
-		vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
-							p->ptc_g_data;
-	} else {
-		clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
-		vcpu->arch.ptc_g_count = 0;
-		set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
-	}
-}
-
-static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
-	struct kvm *kvm = vcpu->kvm;
-	struct call_data call_data;
-	int i;
-	struct kvm_vcpu *vcpui;
-
-	call_data.ptc_g_data = p->u.ptc_g_data;
-
-	kvm_for_each_vcpu(i, vcpui, kvm) {
-		if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
-				vcpu == vcpui)
-			continue;
-
-		if (waitqueue_active(&vcpui->wq))
-			wake_up_interruptible(&vcpui->wq);
-
-		if (vcpui->cpu != -1) {
-			call_data.vcpu = vcpui;
-			smp_call_function_single(vcpui->cpu,
-					vcpu_global_purge, &call_data, 1);
-		} else
-			printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
-
-	}
-	return 1;
-}
-
-static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	return 1;
-}
-
-static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
-{
-	unsigned long pte, rtc_phys_addr, map_addr;
-	int slot;
-
-	map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
-	rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
-	pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
-	slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
-	vcpu->arch.sn_rtc_tr_slot = slot;
-	if (slot < 0) {
-		printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
-		slot = 0;
-	}
-	return slot;
-}
-
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
-{
-
-	ktime_t kt;
-	long itc_diff;
-	unsigned long vcpu_now_itc;
-	unsigned long expires;
-	struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
-	unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
-	struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
-	if (irqchip_in_kernel(vcpu->kvm)) {
-
-		vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
-
-		if (time_after(vcpu_now_itc, vpd->itm)) {
-			vcpu->arch.timer_check = 1;
-			return 1;
-		}
-		itc_diff = vpd->itm - vcpu_now_itc;
-		if (itc_diff < 0)
-			itc_diff = -itc_diff;
-
-		expires = div64_u64(itc_diff, cyc_per_usec);
-		kt = ktime_set(0, 1000 * expires);
-
-		vcpu->arch.ht_active = 1;
-		hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
-
-		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
-		kvm_vcpu_block(vcpu);
-		hrtimer_cancel(p_ht);
-		vcpu->arch.ht_active = 0;
-
-		if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
-				kvm_cpu_has_pending_timer(vcpu))
-			if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
-				vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
-		if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
-			return -EINTR;
-		return 1;
-	} else {
-		printk(KERN_ERR"kvm: Unsupported userspace halt!");
-		return 0;
-	}
-}
-
-static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
-		struct kvm_run *kvm_run)
-{
-	kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
-	return 0;
-}
-
-static int handle_external_interrupt(struct kvm_vcpu *vcpu,
-		struct kvm_run *kvm_run)
-{
-	return 1;
-}
-
-static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
-				struct kvm_run *kvm_run)
-{
-	printk("VMM: %s", vcpu->arch.log_buf);
-	return 1;
-}
-
-static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
-		struct kvm_run *kvm_run) = {
-	[EXIT_REASON_VM_PANIC]              = handle_vm_error,
-	[EXIT_REASON_MMIO_INSTRUCTION]      = handle_mmio,
-	[EXIT_REASON_PAL_CALL]              = handle_pal_call,
-	[EXIT_REASON_SAL_CALL]              = handle_sal_call,
-	[EXIT_REASON_SWITCH_RR6]            = handle_switch_rr6,
-	[EXIT_REASON_VM_DESTROY]            = handle_vm_shutdown,
-	[EXIT_REASON_EXTERNAL_INTERRUPT]    = handle_external_interrupt,
-	[EXIT_REASON_IPI]		    = handle_ipi,
-	[EXIT_REASON_PTC_G]		    = handle_global_purge,
-	[EXIT_REASON_DEBUG]		    = handle_vcpu_debug,
-
-};
-
-static const int kvm_vti_max_exit_handlers =
-		sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
-
-static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
-{
-	struct exit_ctl_data *p_exit_data;
-
-	p_exit_data = kvm_get_exit_data(vcpu);
-	return p_exit_data->exit_reason;
-}
-
-/*
- * The guest has exited.  See if we can fix it or if we need userspace
- * assistance.
- */
-static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
-{
-	u32 exit_reason = kvm_get_exit_reason(vcpu);
-	vcpu->arch.last_exit = exit_reason;
-
-	if (exit_reason < kvm_vti_max_exit_handlers
-			&& kvm_vti_exit_handlers[exit_reason])
-		return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
-	else {
-		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-		kvm_run->hw.hardware_exit_reason = exit_reason;
-	}
-	return 0;
-}
-
-static inline void vti_set_rr6(unsigned long rr6)
-{
-	ia64_set_rr(RR6, rr6);
-	ia64_srlz_i();
-}
-
-static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
-{
-	unsigned long pte;
-	struct kvm *kvm = vcpu->kvm;
-	int r;
-
-	/*Insert a pair of tr to map vmm*/
-	pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
-	r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
-	if (r < 0)
-		goto out;
-	vcpu->arch.vmm_tr_slot = r;
-	/*Insert a pairt of tr to map data of vm*/
-	pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
-	r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
-					pte, KVM_VM_DATA_SHIFT);
-	if (r < 0)
-		goto out;
-	vcpu->arch.vm_tr_slot = r;
-
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
-	if (kvm->arch.is_sn2) {
-		r = kvm_sn2_setup_mappings(vcpu);
-		if (r < 0)
-			goto out;
-	}
-#endif
-
-	r = 0;
-out:
-	return r;
-}
-
-static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
-{
-	struct kvm *kvm = vcpu->kvm;
-	ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
-	ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
-	if (kvm->arch.is_sn2)
-		ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
-#endif
-}
-
-static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
-{
-	unsigned long psr;
-	int r;
-	int cpu = smp_processor_id();
-
-	if (vcpu->arch.last_run_cpu != cpu ||
-			per_cpu(last_vcpu, cpu) != vcpu) {
-		per_cpu(last_vcpu, cpu) = vcpu;
-		vcpu->arch.last_run_cpu = cpu;
-		kvm_flush_tlb_all();
-	}
-
-	vcpu->arch.host_rr6 = ia64_get_rr(RR6);
-	vti_set_rr6(vcpu->arch.vmm_rr);
-	local_irq_save(psr);
-	r = kvm_insert_vmm_mapping(vcpu);
-	local_irq_restore(psr);
-	return r;
-}
-
-static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
-{
-	kvm_purge_vmm_mapping(vcpu);
-	vti_set_rr6(vcpu->arch.host_rr6);
-}
-
-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	union context *host_ctx, *guest_ctx;
-	int r, idx;
-
-	idx = srcu_read_lock(&vcpu->kvm->srcu);
-
-again:
-	if (signal_pending(current)) {
-		r = -EINTR;
-		kvm_run->exit_reason = KVM_EXIT_INTR;
-		goto out;
-	}
-
-	preempt_disable();
-	local_irq_disable();
-
-	/*Get host and guest context with guest address space.*/
-	host_ctx = kvm_get_host_context(vcpu);
-	guest_ctx = kvm_get_guest_context(vcpu);
-
-	clear_bit(KVM_REQ_KICK, &vcpu->requests);
-
-	r = kvm_vcpu_pre_transition(vcpu);
-	if (r < 0)
-		goto vcpu_run_fail;
-
-	srcu_read_unlock(&vcpu->kvm->srcu, idx);
-	vcpu->mode = IN_GUEST_MODE;
-	kvm_guest_enter();
-
-	/*
-	 * Transition to the guest
-	 */
-	kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
-
-	kvm_vcpu_post_transition(vcpu);
-
-	vcpu->arch.launched = 1;
-	set_bit(KVM_REQ_KICK, &vcpu->requests);
-	local_irq_enable();
-
-	/*
-	 * We must have an instruction between local_irq_enable() and
-	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
-	 * the interrupt shadow.  The stat.exits increment will do nicely.
-	 * But we need to prevent reordering, hence this barrier():
-	 */
-	barrier();
-	kvm_guest_exit();
-	vcpu->mode = OUTSIDE_GUEST_MODE;
-	preempt_enable();
-
-	idx = srcu_read_lock(&vcpu->kvm->srcu);
-
-	r = kvm_handle_exit(kvm_run, vcpu);
-
-	if (r > 0) {
-		if (!need_resched())
-			goto again;
-	}
-
-out:
-	srcu_read_unlock(&vcpu->kvm->srcu, idx);
-	if (r > 0) {
-		cond_resched();
-		idx = srcu_read_lock(&vcpu->kvm->srcu);
-		goto again;
-	}
-
-	return r;
-
-vcpu_run_fail:
-	local_irq_enable();
-	preempt_enable();
-	kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-	goto out;
-}
-
-static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
-{
-	struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
-
-	if (!vcpu->mmio_is_write)
-		memcpy(&p->data, vcpu->arch.mmio_data, 8);
-	p->state = STATE_IORESP_READY;
-}
-
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-	int r;
-	sigset_t sigsaved;
-
-	if (vcpu->sigset_active)
-		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
-	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
-		kvm_vcpu_block(vcpu);
-		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
-		r = -EAGAIN;
-		goto out;
-	}
-
-	if (vcpu->mmio_needed) {
-		memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
-		kvm_set_mmio_data(vcpu);
-		vcpu->mmio_read_completed = 1;
-		vcpu->mmio_needed = 0;
-	}
-	r = __vcpu_run(vcpu, kvm_run);
-out:
-	if (vcpu->sigset_active)
-		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
-
-	return r;
-}
-
-struct kvm *kvm_arch_alloc_vm(void)
-{
-
-	struct kvm *kvm;
-	uint64_t  vm_base;
-
-	BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
-
-	vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
-
-	if (!vm_base)
-		return NULL;
-
-	memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
-	kvm = (struct kvm *)(vm_base +
-			offsetof(struct kvm_vm_data, kvm_vm_struct));
-	kvm->arch.vm_base = vm_base;
-	printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
-
-	return kvm;
-}
-
-struct kvm_ia64_io_range {
-	unsigned long start;
-	unsigned long size;
-	unsigned long type;
-};
-
-static const struct kvm_ia64_io_range io_ranges[] = {
-	{VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
-	{MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
-	{LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
-	{IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
-	{PIB_START, PIB_SIZE, GPFN_PIB},
-};
-
-static void kvm_build_io_pmt(struct kvm *kvm)
-{
-	unsigned long i, j;
-
-	/* Mark I/O ranges */
-	for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
-							i++) {
-		for (j = io_ranges[i].start;
-				j < io_ranges[i].start + io_ranges[i].size;
-				j += PAGE_SIZE)
-			kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
-					io_ranges[i].type, 0);
-	}
-
-}
-
-/*Use unused rids to virtualize guest rid.*/
-#define GUEST_PHYSICAL_RR0	0x1739
-#define GUEST_PHYSICAL_RR4	0x2739
-#define VMM_INIT_RR		0x1660
-
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
-	BUG_ON(!kvm);
-
-	if (type)
-		return -EINVAL;
-
-	kvm->arch.is_sn2 = ia64_platform_is("sn2");
-
-	kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
-	kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
-	kvm->arch.vmm_init_rr = VMM_INIT_RR;
-
-	/*
-	 *Fill P2M entries for MMIO/IO ranges
-	 */
-	kvm_build_io_pmt(kvm);
-
-	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
-
-	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
-	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
-
-	return 0;
-}
-
-static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
-					struct kvm_irqchip *chip)
-{
-	int r;
-
-	r = 0;
-	switch (chip->chip_id) {
-	case KVM_IRQCHIP_IOAPIC:
-		r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
-		break;
-	default:
-		r = -EINVAL;
-		break;
-	}
-	return r;
-}
-
-static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
-{
-	int r;
-
-	r = 0;
-	switch (chip->chip_id) {
-	case KVM_IRQCHIP_IOAPIC:
-		r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
-		break;
-	default:
-		r = -EINVAL;
-		break;
-	}
-	return r;
-}
-
-#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
-
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-	struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-	int i;
-
-	for (i = 0; i < 16; i++) {
-		vpd->vgr[i] = regs->vpd.vgr[i];
-		vpd->vbgr[i] = regs->vpd.vbgr[i];
-	}
-	for (i = 0; i < 128; i++)
-		vpd->vcr[i] = regs->vpd.vcr[i];
-	vpd->vhpi = regs->vpd.vhpi;
-	vpd->vnat = regs->vpd.vnat;
-	vpd->vbnat = regs->vpd.vbnat;
-	vpd->vpsr = regs->vpd.vpsr;
-
-	vpd->vpr = regs->vpd.vpr;
-
-	memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
-
-	RESTORE_REGS(mp_state);
-	RESTORE_REGS(vmm_rr);
-	memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
-	memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
-	RESTORE_REGS(itr_regions);
-	RESTORE_REGS(dtr_regions);
-	RESTORE_REGS(tc_regions);
-	RESTORE_REGS(irq_check);
-	RESTORE_REGS(itc_check);
-	RESTORE_REGS(timer_check);
-	RESTORE_REGS(timer_pending);
-	RESTORE_REGS(last_itc);
-	for (i = 0; i < 8; i++) {
-		vcpu->arch.vrr[i] = regs->vrr[i];
-		vcpu->arch.ibr[i] = regs->ibr[i];
-		vcpu->arch.dbr[i] = regs->dbr[i];
-	}
-	for (i = 0; i < 4; i++)
-		vcpu->arch.insvc[i] = regs->insvc[i];
-	RESTORE_REGS(xtp);
-	RESTORE_REGS(metaphysical_rr0);
-	RESTORE_REGS(metaphysical_rr4);
-	RESTORE_REGS(metaphysical_saved_rr0);
-	RESTORE_REGS(metaphysical_saved_rr4);
-	RESTORE_REGS(fp_psr);
-	RESTORE_REGS(saved_gp);
-
-	vcpu->arch.irq_new_pending = 1;
-	vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
-	set_bit(KVM_REQ_RESUME, &vcpu->requests);
-
-	return 0;
-}
-
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
-		bool line_status)
-{
-	if (!irqchip_in_kernel(kvm))
-		return -ENXIO;
-
-	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
-					irq_event->irq, irq_event->level,
-					line_status);
-	return 0;
-}
-
-long kvm_arch_vm_ioctl(struct file *filp,
-		unsigned int ioctl, unsigned long arg)
-{
-	struct kvm *kvm = filp->private_data;
-	void __user *argp = (void __user *)arg;
-	int r = -ENOTTY;
-
-	switch (ioctl) {
-	case KVM_CREATE_IRQCHIP:
-		r = -EFAULT;
-		r = kvm_ioapic_init(kvm);
-		if (r)
-			goto out;
-		r = kvm_setup_default_irq_routing(kvm);
-		if (r) {
-			mutex_lock(&kvm->slots_lock);
-			kvm_ioapic_destroy(kvm);
-			mutex_unlock(&kvm->slots_lock);
-			goto out;
-		}
-		break;
-	case KVM_GET_IRQCHIP: {
-		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-		struct kvm_irqchip chip;
-
-		r = -EFAULT;
-		if (copy_from_user(&chip, argp, sizeof chip))
-				goto out;
-		r = -ENXIO;
-		if (!irqchip_in_kernel(kvm))
-			goto out;
-		r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
-		if (r)
-			goto out;
-		r = -EFAULT;
-		if (copy_to_user(argp, &chip, sizeof chip))
-				goto out;
-		r = 0;
-		break;
-		}
-	case KVM_SET_IRQCHIP: {
-		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
-		struct kvm_irqchip chip;
-
-		r = -EFAULT;
-		if (copy_from_user(&chip, argp, sizeof chip))
-				goto out;
-		r = -ENXIO;
-		if (!irqchip_in_kernel(kvm))
-			goto out;
-		r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
-		if (r)
-			goto out;
-		r = 0;
-		break;
-		}
-	default:
-		;
-	}
-out:
-	return r;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
-		struct kvm_sregs *sregs)
-{
-	return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
-		struct kvm_sregs *sregs)
-{
-	return -EINVAL;
-
-}
-int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
-		struct kvm_translation *tr)
-{
-
-	return -EINVAL;
-}
-
-static int kvm_alloc_vmm_area(void)
-{
-	if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
-		kvm_vmm_base = __get_free_pages(GFP_KERNEL,
-				get_order(KVM_VMM_SIZE));
-		if (!kvm_vmm_base)
-			return -ENOMEM;
-
-		memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
-		kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
-
-		printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
-				kvm_vmm_base, kvm_vm_buffer);
-	}
-
-	return 0;
-}
-
-static void kvm_free_vmm_area(void)
-{
-	if (kvm_vmm_base) {
-		/*Zero this area before free to avoid bits leak!!*/
-		memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
-		free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
-		kvm_vmm_base  = 0;
-		kvm_vm_buffer = 0;
-		kvm_vsa_base = 0;
-	}
-}
-
-static int vti_init_vpd(struct kvm_vcpu *vcpu)
-{
-	int i;
-	union cpuid3_t cpuid3;
-	struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
-	if (IS_ERR(vpd))
-		return PTR_ERR(vpd);
-
-	/* CPUID init */
-	for (i = 0; i < 5; i++)
-		vpd->vcpuid[i] = ia64_get_cpuid(i);
-
-	/* Limit the CPUID number to 5 */
-	cpuid3.value = vpd->vcpuid[3];
-	cpuid3.number = 4;	/* 5 - 1 */
-	vpd->vcpuid[3] = cpuid3.value;
-
-	/*Set vac and vdc fields*/
-	vpd->vac.a_from_int_cr = 1;
-	vpd->vac.a_to_int_cr = 1;
-	vpd->vac.a_from_psr = 1;
-	vpd->vac.a_from_cpuid = 1;
-	vpd->vac.a_cover = 1;
-	vpd->vac.a_bsw = 1;
-	vpd->vac.a_int = 1;
-	vpd->vdc.d_vmsw = 1;
-
-	/*Set virtual buffer*/
-	vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
-
-	return 0;
-}
-
-static int vti_create_vp(struct kvm_vcpu *vcpu)
-{
-	long ret;
-	struct vpd *vpd = vcpu->arch.vpd;
-	unsigned long  vmm_ivt;
-
-	vmm_ivt = kvm_vmm_info->vmm_ivt;
-
-	printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
-
-	ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
-
-	if (ret) {
-		printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void init_ptce_info(struct kvm_vcpu *vcpu)
-{
-	ia64_ptce_info_t ptce = {0};
-
-	ia64_get_ptce(&ptce);
-	vcpu->arch.ptce_base = ptce.base;
-	vcpu->arch.ptce_count[0] = ptce.count[0];
-	vcpu->arch.ptce_count[1] = ptce.count[1];
-	vcpu->arch.ptce_stride[0] = ptce.stride[0];
-	vcpu->arch.ptce_stride[1] = ptce.stride[1];
-}
-
-static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
-{
-	struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
-
-	if (hrtimer_cancel(p_ht))
-		hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
-}
-
-static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
-{
-	struct kvm_vcpu *vcpu;
-	wait_queue_head_t *q;
-
-	vcpu  = container_of(data, struct kvm_vcpu, arch.hlt_timer);
-	q = &vcpu->wq;
-
-	if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
-		goto out;
-
-	if (waitqueue_active(q))
-		wake_up_interruptible(q);
-
-out:
-	vcpu->arch.timer_fired = 1;
-	vcpu->arch.timer_check = 1;
-	return HRTIMER_NORESTART;
-}
-
-#define PALE_RESET_ENTRY    0x80000000ffffffb0UL
-
-bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
-{
-	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-	struct kvm_vcpu *v;
-	int r;
-	int i;
-	long itc_offset;
-	struct kvm *kvm = vcpu->kvm;
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	union context *p_ctx = &vcpu->arch.guest;
-	struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
-
-	/*Init vcpu context for first run.*/
-	if (IS_ERR(vmm_vcpu))
-		return PTR_ERR(vmm_vcpu);
-
-	if (kvm_vcpu_is_bsp(vcpu)) {
-		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
-		/*Set entry address for first run.*/
-		regs->cr_iip = PALE_RESET_ENTRY;
-
-		/*Initialize itc offset for vcpus*/
-		itc_offset = 0UL - kvm_get_itc(vcpu);
-		for (i = 0; i < KVM_MAX_VCPUS; i++) {
-			v = (struct kvm_vcpu *)((char *)vcpu +
-					sizeof(struct kvm_vcpu_data) * i);
-			v->arch.itc_offset = itc_offset;
-			v->arch.last_itc = 0;
-		}
-	} else
-		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
-
-	r = -ENOMEM;
-	vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
-	if (!vcpu->arch.apic)
-		goto out;
-	vcpu->arch.apic->vcpu = vcpu;
-
-	p_ctx->gr[1] = 0;
-	p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
-	p_ctx->gr[13] = (unsigned long)vmm_vcpu;
-	p_ctx->psr = 0x1008522000UL;
-	p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
-	p_ctx->caller_unat = 0;
-	p_ctx->pr = 0x0;
-	p_ctx->ar[36] = 0x0; /*unat*/
-	p_ctx->ar[19] = 0x0; /*rnat*/
-	p_ctx->ar[18] = (unsigned long)vmm_vcpu +
-				((sizeof(struct kvm_vcpu)+15) & ~15);
-	p_ctx->ar[64] = 0x0; /*pfs*/
-	p_ctx->cr[0] = 0x7e04UL;
-	p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
-	p_ctx->cr[8] = 0x3c;
-
-	/*Initialize region register*/
-	p_ctx->rr[0] = 0x30;
-	p_ctx->rr[1] = 0x30;
-	p_ctx->rr[2] = 0x30;
-	p_ctx->rr[3] = 0x30;
-	p_ctx->rr[4] = 0x30;
-	p_ctx->rr[5] = 0x30;
-	p_ctx->rr[7] = 0x30;
-
-	/*Initialize branch register 0*/
-	p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
-
-	vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
-	vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
-	vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
-
-	hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-	vcpu->arch.hlt_timer.function = hlt_timer_fn;
-
-	vcpu->arch.last_run_cpu = -1;
-	vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
-	vcpu->arch.vsa_base = kvm_vsa_base;
-	vcpu->arch.__gp = kvm_vmm_gp;
-	vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
-	vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
-	vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
-	init_ptce_info(vcpu);
-
-	r = 0;
-out:
-	return r;
-}
-
-static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
-{
-	unsigned long psr;
-	int r;
-
-	local_irq_save(psr);
-	r = kvm_insert_vmm_mapping(vcpu);
-	local_irq_restore(psr);
-	if (r)
-		goto fail;
-	r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
-	if (r)
-		goto fail;
-
-	r = vti_init_vpd(vcpu);
-	if (r) {
-		printk(KERN_DEBUG"kvm: vpd init error!!\n");
-		goto uninit;
-	}
-
-	r = vti_create_vp(vcpu);
-	if (r)
-		goto uninit;
-
-	kvm_purge_vmm_mapping(vcpu);
-
-	return 0;
-uninit:
-	kvm_vcpu_uninit(vcpu);
-fail:
-	return r;
-}
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
-		unsigned int id)
-{
-	struct kvm_vcpu *vcpu;
-	unsigned long vm_base = kvm->arch.vm_base;
-	int r;
-	int cpu;
-
-	BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
-
-	r = -EINVAL;
-	if (id >= KVM_MAX_VCPUS) {
-		printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
-				KVM_MAX_VCPUS);
-		goto fail;
-	}
-
-	r = -ENOMEM;
-	if (!vm_base) {
-		printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
-		goto fail;
-	}
-	vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
-					vcpu_data[id].vcpu_struct));
-	vcpu->kvm = kvm;
-
-	cpu = get_cpu();
-	r = vti_vcpu_setup(vcpu, id);
-	put_cpu();
-
-	if (r) {
-		printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
-		goto fail;
-	}
-
-	return vcpu;
-fail:
-	return ERR_PTR(r);
-}
-
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-	return 0;
-}
-
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
-	return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
-	return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
-	return -EINVAL;
-}
-
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
-					struct kvm_guest_debug *dbg)
-{
-	return -EINVAL;
-}
-
-void kvm_arch_free_vm(struct kvm *kvm)
-{
-	unsigned long vm_base = kvm->arch.vm_base;
-
-	if (vm_base) {
-		memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
-		free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
-	}
-
-}
-
-static void kvm_release_vm_pages(struct kvm *kvm)
-{
-	struct kvm_memslots *slots;
-	struct kvm_memory_slot *memslot;
-	int j;
-
-	slots = kvm_memslots(kvm);
-	kvm_for_each_memslot(memslot, slots) {
-		for (j = 0; j < memslot->npages; j++) {
-			if (memslot->rmap[j])
-				put_page((struct page *)memslot->rmap[j]);
-		}
-	}
-}
-
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
-	kvm_iommu_unmap_guest(kvm);
-	kvm_free_all_assigned_devices(kvm);
-	kfree(kvm->arch.vioapic);
-	kvm_release_vm_pages(kvm);
-}
-
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-	if (cpu != vcpu->cpu) {
-		vcpu->cpu = cpu;
-		if (vcpu->arch.ht_active)
-			kvm_migrate_hlt_timer(vcpu);
-	}
-}
-
-#define SAVE_REGS(_x) 	regs->_x = vcpu->arch._x
-
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-	struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-	int i;
-
-	vcpu_load(vcpu);
-
-	for (i = 0; i < 16; i++) {
-		regs->vpd.vgr[i] = vpd->vgr[i];
-		regs->vpd.vbgr[i] = vpd->vbgr[i];
-	}
-	for (i = 0; i < 128; i++)
-		regs->vpd.vcr[i] = vpd->vcr[i];
-	regs->vpd.vhpi = vpd->vhpi;
-	regs->vpd.vnat = vpd->vnat;
-	regs->vpd.vbnat = vpd->vbnat;
-	regs->vpd.vpsr = vpd->vpsr;
-	regs->vpd.vpr = vpd->vpr;
-
-	memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
-
-	SAVE_REGS(mp_state);
-	SAVE_REGS(vmm_rr);
-	memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
-	memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
-	SAVE_REGS(itr_regions);
-	SAVE_REGS(dtr_regions);
-	SAVE_REGS(tc_regions);
-	SAVE_REGS(irq_check);
-	SAVE_REGS(itc_check);
-	SAVE_REGS(timer_check);
-	SAVE_REGS(timer_pending);
-	SAVE_REGS(last_itc);
-	for (i = 0; i < 8; i++) {
-		regs->vrr[i] = vcpu->arch.vrr[i];
-		regs->ibr[i] = vcpu->arch.ibr[i];
-		regs->dbr[i] = vcpu->arch.dbr[i];
-	}
-	for (i = 0; i < 4; i++)
-		regs->insvc[i] = vcpu->arch.insvc[i];
-	regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
-	SAVE_REGS(xtp);
-	SAVE_REGS(metaphysical_rr0);
-	SAVE_REGS(metaphysical_rr4);
-	SAVE_REGS(metaphysical_saved_rr0);
-	SAVE_REGS(metaphysical_saved_rr4);
-	SAVE_REGS(fp_psr);
-	SAVE_REGS(saved_gp);
-
-	vcpu_put(vcpu);
-	return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
-				  struct kvm_ia64_vcpu_stack *stack)
-{
-	memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
-	return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
-				  struct kvm_ia64_vcpu_stack *stack)
-{
-	memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
-	       sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
-
-	vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
-	return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-
-	hrtimer_cancel(&vcpu->arch.hlt_timer);
-	kfree(vcpu->arch.apic);
-}
-
-long kvm_arch_vcpu_ioctl(struct file *filp,
-			 unsigned int ioctl, unsigned long arg)
-{
-	struct kvm_vcpu *vcpu = filp->private_data;
-	void __user *argp = (void __user *)arg;
-	struct kvm_ia64_vcpu_stack *stack = NULL;
-	long r;
-
-	switch (ioctl) {
-	case KVM_IA64_VCPU_GET_STACK: {
-		struct kvm_ia64_vcpu_stack __user *user_stack;
-	        void __user *first_p = argp;
-
-		r = -EFAULT;
-		if (copy_from_user(&user_stack, first_p, sizeof(void *)))
-			goto out;
-
-		if (!access_ok(VERIFY_WRITE, user_stack,
-			       sizeof(struct kvm_ia64_vcpu_stack))) {
-			printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
-			       "Illegal user destination address for stack\n");
-			goto out;
-		}
-		stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
-		if (!stack) {
-			r = -ENOMEM;
-			goto out;
-		}
-
-		r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
-		if (r)
-			goto out;
-
-		if (copy_to_user(user_stack, stack,
-				 sizeof(struct kvm_ia64_vcpu_stack))) {
-			r = -EFAULT;
-			goto out;
-		}
-
-		break;
-	}
-	case KVM_IA64_VCPU_SET_STACK: {
-		struct kvm_ia64_vcpu_stack __user *user_stack;
-	        void __user *first_p = argp;
-
-		r = -EFAULT;
-		if (copy_from_user(&user_stack, first_p, sizeof(void *)))
-			goto out;
-
-		if (!access_ok(VERIFY_READ, user_stack,
-			    sizeof(struct kvm_ia64_vcpu_stack))) {
-			printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
-			       "Illegal user address for stack\n");
-			goto out;
-		}
-		stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
-		if (!stack) {
-			r = -ENOMEM;
-			goto out;
-		}
-		if (copy_from_user(stack, user_stack,
-				   sizeof(struct kvm_ia64_vcpu_stack)))
-			goto out;
-
-		r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
-		break;
-	}
-
-	default:
-		r = -EINVAL;
-	}
-
-out:
-	kfree(stack);
-	return r;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
-	return VM_FAULT_SIGBUS;
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
-			    unsigned long npages)
-{
-	return 0;
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
-		struct kvm_memory_slot *memslot,
-		struct kvm_userspace_memory_region *mem,
-		enum kvm_mr_change change)
-{
-	unsigned long i;
-	unsigned long pfn;
-	int npages = memslot->npages;
-	unsigned long base_gfn = memslot->base_gfn;
-
-	if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
-		return -ENOMEM;
-
-	for (i = 0; i < npages; i++) {
-		pfn = gfn_to_pfn(kvm, base_gfn + i);
-		if (!kvm_is_reserved_pfn(pfn)) {
-			kvm_set_pmt_entry(kvm, base_gfn + i,
-					pfn << PAGE_SHIFT,
-				_PAGE_AR_RWX | _PAGE_MA_WB);
-			memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
-		} else {
-			kvm_set_pmt_entry(kvm, base_gfn + i,
-					GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
-					_PAGE_MA_UC);
-			memslot->rmap[i] = 0;
-			}
-	}
-
-	return 0;
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-	kvm_flush_remote_tlbs(kvm);
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-				   struct kvm_memory_slot *slot)
-{
-	kvm_arch_flush_shadow_all();
-}
-
-long kvm_arch_dev_ioctl(struct file *filp,
-			unsigned int ioctl, unsigned long arg)
-{
-	return -EINVAL;
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-	kvm_vcpu_uninit(vcpu);
-}
-
-static int vti_cpu_has_kvm_support(void)
-{
-	long  avail = 1, status = 1, control = 1;
-	long ret;
-
-	ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
-	if (ret)
-		goto out;
-
-	if (!(avail & PAL_PROC_VM_BIT))
-		goto out;
-
-	printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
-
-	ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
-	if (ret)
-		goto out;
-	printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
-
-	if (!(vp_env_info & VP_OPCODE)) {
-		printk(KERN_WARNING"kvm: No opcode ability on hardware, "
-				"vm_env_info:0x%lx\n", vp_env_info);
-	}
-
-	return 1;
-out:
-	return 0;
-}
-
-
-/*
- * On SN2, the ITC isn't stable, so copy in fast path code to use the
- * SN2 RTC, replacing the ITC based default verion.
- */
-static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
-			  struct module *module)
-{
-	unsigned long new_ar, new_ar_sn2;
-	unsigned long module_base;
-
-	if (!ia64_platform_is("sn2"))
-		return;
-
-	module_base = (unsigned long)module->module_core;
-
-	new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
-	new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
-
-	printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
-	       "as source\n");
-
-	/*
-	 * Copy the SN2 version of mov_ar into place. They are both
-	 * the same size, so 6 bundles is sufficient (6 * 0x10).
-	 */
-	memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
-}
-
-static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
-			    struct module *module)
-{
-	unsigned long module_base;
-	unsigned long vmm_size;
-
-	unsigned long vmm_offset, func_offset, fdesc_offset;
-	struct fdesc *p_fdesc;
-
-	BUG_ON(!module);
-
-	if (!kvm_vmm_base) {
-		printk("kvm: kvm area hasn't been initialized yet!!\n");
-		return -EFAULT;
-	}
-
-	/*Calculate new position of relocated vmm module.*/
-	module_base = (unsigned long)module->module_core;
-	vmm_size = module->core_size;
-	if (unlikely(vmm_size > KVM_VMM_SIZE))
-		return -EFAULT;
-
-	memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
-	kvm_patch_vmm(vmm_info, module);
-	kvm_flush_icache(kvm_vmm_base, vmm_size);
-
-	/*Recalculate kvm_vmm_info based on new VMM*/
-	vmm_offset = vmm_info->vmm_ivt - module_base;
-	kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
-	printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
-			kvm_vmm_info->vmm_ivt);
-
-	fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
-	kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
-							fdesc_offset);
-	func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
-	p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
-	p_fdesc->ip = KVM_VMM_BASE + func_offset;
-	p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
-
-	printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
-			KVM_VMM_BASE+func_offset);
-
-	fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
-	kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
-			fdesc_offset);
-	func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
-	p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
-	p_fdesc->ip = KVM_VMM_BASE + func_offset;
-	p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
-
-	kvm_vmm_gp = p_fdesc->gp;
-
-	printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
-						kvm_vmm_info->vmm_entry);
-	printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
-						KVM_VMM_BASE + func_offset);
-
-	return 0;
-}
-
-int kvm_arch_init(void *opaque)
-{
-	int r;
-	struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
-
-	if (!vti_cpu_has_kvm_support()) {
-		printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
-		r = -EOPNOTSUPP;
-		goto out;
-	}
-
-	if (kvm_vmm_info) {
-		printk(KERN_ERR "kvm: Already loaded VMM module!\n");
-		r = -EEXIST;
-		goto out;
-	}
-
-	r = -ENOMEM;
-	kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
-	if (!kvm_vmm_info)
-		goto out;
-
-	if (kvm_alloc_vmm_area())
-		goto out_free0;
-
-	r = kvm_relocate_vmm(vmm_info, vmm_info->module);
-	if (r)
-		goto out_free1;
-
-	return 0;
-
-out_free1:
-	kvm_free_vmm_area();
-out_free0:
-	kfree(kvm_vmm_info);
-out:
-	return r;
-}
-
-void kvm_arch_exit(void)
-{
-	kvm_free_vmm_area();
-	kfree(kvm_vmm_info);
-	kvm_vmm_info = NULL;
-}
-
-static void kvm_ia64_sync_dirty_log(struct kvm *kvm,
-				    struct kvm_memory_slot *memslot)
-{
-	int i;
-	long base;
-	unsigned long n;
-	unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
-			offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
-
-	n = kvm_dirty_bitmap_bytes(memslot);
-	base = memslot->base_gfn / BITS_PER_LONG;
-
-	spin_lock(&kvm->arch.dirty_log_lock);
-	for (i = 0; i < n/sizeof(long); ++i) {
-		memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
-		dirty_bitmap[base + i] = 0;
-	}
-	spin_unlock(&kvm->arch.dirty_log_lock);
-}
-
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
-		struct kvm_dirty_log *log)
-{
-	int r;
-	unsigned long n;
-	struct kvm_memory_slot *memslot;
-	int is_dirty = 0;
-
-	mutex_lock(&kvm->slots_lock);
-
-	r = -EINVAL;
-	if (log->slot >= KVM_USER_MEM_SLOTS)
-		goto out;
-
-	memslot = id_to_memslot(kvm->memslots, log->slot);
-	r = -ENOENT;
-	if (!memslot->dirty_bitmap)
-		goto out;
-
-	kvm_ia64_sync_dirty_log(kvm, memslot);
-	r = kvm_get_dirty_log(kvm, log, &is_dirty);
-	if (r)
-		goto out;
-
-	/* If nothing is dirty, don't bother messing with page tables. */
-	if (is_dirty) {
-		kvm_flush_remote_tlbs(kvm);
-		n = kvm_dirty_bitmap_bytes(memslot);
-		memset(memslot->dirty_bitmap, 0, n);
-	}
-	r = 0;
-out:
-	mutex_unlock(&kvm->slots_lock);
-	return r;
-}
-
-int kvm_arch_hardware_setup(void)
-{
-	return 0;
-}
-
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
-{
-	return __apic_accept_irq(vcpu, irq->vector);
-}
-
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
-{
-	return apic->vcpu->vcpu_id == dest;
-}
-
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
-{
-	return 0;
-}
-
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
-{
-	return vcpu1->arch.xtp - vcpu2->arch.xtp;
-}
-
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
-		int short_hand, int dest, int dest_mode)
-{
-	struct kvm_lapic *target = vcpu->arch.apic;
-	return (dest_mode == 0) ?
-		kvm_apic_match_physical_addr(target, dest) :
-		kvm_apic_match_logical_addr(target, dest);
-}
-
-static int find_highest_bits(int *dat)
-{
-	u32  bits, bitnum;
-	int i;
-
-	/* loop for all 256 bits */
-	for (i = 7; i >= 0 ; i--) {
-		bits = dat[i];
-		if (bits) {
-			bitnum = fls(bits);
-			return i * 32 + bitnum - 1;
-		}
-	}
-
-	return -1;
-}
-
-int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
-{
-    struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
-    if (vpd->irr[0] & (1UL << NMI_VECTOR))
-		return NMI_VECTOR;
-    if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
-		return ExtINT_VECTOR;
-
-    return find_highest_bits((int *)&vpd->irr[0]);
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
-	return vcpu->arch.timer_fired;
-}
-
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
-	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
-		(kvm_highest_pending_irq(vcpu) != -1);
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
-	return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
-}
-
-int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
-				    struct kvm_mp_state *mp_state)
-{
-	mp_state->mp_state = vcpu->arch.mp_state;
-	return 0;
-}
-
-static int vcpu_reset(struct kvm_vcpu *vcpu)
-{
-	int r;
-	long psr;
-	local_irq_save(psr);
-	r = kvm_insert_vmm_mapping(vcpu);
-	local_irq_restore(psr);
-	if (r)
-		goto fail;
-
-	vcpu->arch.launched = 0;
-	kvm_arch_vcpu_uninit(vcpu);
-	r = kvm_arch_vcpu_init(vcpu);
-	if (r)
-		goto fail;
-
-	kvm_purge_vmm_mapping(vcpu);
-	r = 0;
-fail:
-	return r;
-}
-
-int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
-				    struct kvm_mp_state *mp_state)
-{
-	int r = 0;
-
-	vcpu->arch.mp_state = mp_state->mp_state;
-	if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
-		r = vcpu_reset(vcpu);
-	return r;
-}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
deleted file mode 100644
index cb548ee..0000000
--- a/arch/ia64/kvm/kvm_fw.c
+++ /dev/null
@@ -1,674 +0,0 @@
-/*
- * PAL/SAL call delegation
- *
- * Copyright (c) 2004 Li Susie <susie.li@intel.com>
- * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
- * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <linux/kvm_host.h>
-#include <linux/smp.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/shub_mmr.h>
-
-#include "vti.h"
-#include "misc.h"
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/tlb.h>
-
-/*
- * Handy macros to make sure that the PAL return values start out
- * as something meaningful.
- */
-#define INIT_PAL_STATUS_UNIMPLEMENTED(x)		\
-	{						\
-		x.status = PAL_STATUS_UNIMPLEMENTED;	\
-		x.v0 = 0;				\
-		x.v1 = 0;				\
-		x.v2 = 0;				\
-	}
-
-#define INIT_PAL_STATUS_SUCCESS(x)			\
-	{						\
-		x.status = PAL_STATUS_SUCCESS;		\
-		x.v0 = 0;				\
-		x.v1 = 0;				\
-		x.v2 = 0;				\
-    }
-
-static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
-		u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
-	struct exit_ctl_data *p;
-
-	if (vcpu) {
-		p = &vcpu->arch.exit_data;
-		if (p->exit_reason == EXIT_REASON_PAL_CALL) {
-			*gr28 = p->u.pal_data.gr28;
-			*gr29 = p->u.pal_data.gr29;
-			*gr30 = p->u.pal_data.gr30;
-			*gr31 = p->u.pal_data.gr31;
-			return ;
-		}
-	}
-	printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
-}
-
-static void set_pal_result(struct kvm_vcpu *vcpu,
-		struct ia64_pal_retval result) {
-
-	struct exit_ctl_data *p;
-
-	p = kvm_get_exit_data(vcpu);
-	if (p->exit_reason == EXIT_REASON_PAL_CALL) {
-		p->u.pal_data.ret = result;
-		return ;
-	}
-	INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
-}
-
-static void set_sal_result(struct kvm_vcpu *vcpu,
-		struct sal_ret_values result) {
-	struct exit_ctl_data *p;
-
-	p = kvm_get_exit_data(vcpu);
-	if (p->exit_reason == EXIT_REASON_SAL_CALL) {
-		p->u.sal_data.ret = result;
-		return ;
-	}
-	printk(KERN_WARNING"Failed to set sal result!!\n");
-}
-
-struct cache_flush_args {
-	u64 cache_type;
-	u64 operation;
-	u64 progress;
-	long status;
-};
-
-cpumask_t cpu_cache_coherent_map;
-
-static void remote_pal_cache_flush(void *data)
-{
-	struct cache_flush_args *args = data;
-	long status;
-	u64 progress = args->progress;
-
-	status = ia64_pal_cache_flush(args->cache_type, args->operation,
-					&progress, NULL);
-	if (status != 0)
-	args->status = status;
-}
-
-static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
-{
-	u64 gr28, gr29, gr30, gr31;
-	struct ia64_pal_retval result = {0, 0, 0, 0};
-	struct cache_flush_args args = {0, 0, 0, 0};
-	long psr;
-
-	gr28 = gr29 = gr30 = gr31 = 0;
-	kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
-
-	if (gr31 != 0)
-		printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
-
-	/* Always call Host Pal in int=1 */
-	gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
-	args.cache_type = gr29;
-	args.operation = gr30;
-	smp_call_function(remote_pal_cache_flush,
-				(void *)&args, 1);
-	if (args.status != 0)
-		printk(KERN_ERR"pal_cache_flush error!,"
-				"status:0x%lx\n", args.status);
-	/*
-	 * Call Host PAL cache flush
-	 * Clear psr.ic when call PAL_CACHE_FLUSH
-	 */
-	local_irq_save(psr);
-	result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
-						&result.v0);
-	local_irq_restore(psr);
-	if (result.status != 0)
-		printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
-				"in1:%lx,in2:%lx\n",
-				vcpu, result.status, gr29, gr30);
-
-#if 0
-	if (gr29 == PAL_CACHE_TYPE_COHERENT) {
-		cpus_setall(vcpu->arch.cache_coherent_map);
-		cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
-		cpus_setall(cpu_cache_coherent_map);
-		cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
-	}
-#endif
-	return result;
-}
-
-struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
-{
-
-	struct ia64_pal_retval result;
-
-	PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
-	return result;
-}
-
-static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
-{
-
-	struct ia64_pal_retval result;
-
-	PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
-
-	/*
-	 * PAL_FREQ_BASE may not be implemented in some platforms,
-	 * call SAL instead.
-	 */
-	if (result.v0 == 0) {
-		result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
-							&result.v0,
-							&result.v1);
-		result.v2 = 0;
-	}
-
-	return result;
-}
-
-/*
- * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
- * RTC is used instead. This function patches the ratios from SAL
- * to match the RTC before providing them to the guest.
- */
-static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
-{
-	struct pal_freq_ratio *ratio;
-	unsigned long sal_freq, sal_drift, factor;
-
-	result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
-					    &sal_freq, &sal_drift);
-	ratio = (struct pal_freq_ratio *)&result->v2;
-	factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
-		sn_rtc_cycles_per_second;
-
-	ratio->num = 3;
-	ratio->den = factor;
-}
-
-static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
-{
-	struct ia64_pal_retval result;
-
-	PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
-
-	if (vcpu->kvm->arch.is_sn2)
-		sn2_patch_itc_freq_ratios(&result);
-
-	return result;
-}
-
-static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
-{
-	struct ia64_pal_retval result;
-
-	INIT_PAL_STATUS_UNIMPLEMENTED(result);
-	return result;
-}
-
-static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
-{
-
-	struct ia64_pal_retval result;
-
-	INIT_PAL_STATUS_SUCCESS(result);
-	return result;
-}
-
-static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
-{
-
-	struct ia64_pal_retval result = {0, 0, 0, 0};
-	long in0, in1, in2, in3;
-
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-	result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
-			&result.v2, in2);
-
-	return result;
-}
-
-static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
-{
-
-	struct ia64_pal_retval result = {0, 0, 0, 0};
-	long in0, in1, in2, in3;
-
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-	result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
-
-	return result;
-}
-
-static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
-{
-
-	pal_cache_config_info_t ci;
-	long status;
-	unsigned long in0, in1, in2, in3, r9, r10;
-
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-	status = ia64_pal_cache_config_info(in1, in2, &ci);
-	r9 = ci.pcci_info_1.pcci1_data;
-	r10 = ci.pcci_info_2.pcci2_data;
-	return ((struct ia64_pal_retval){status, r9, r10, 0});
-}
-
-#define GUEST_IMPL_VA_MSB	59
-#define GUEST_RID_BITS		18
-
-static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
-{
-
-	pal_vm_info_1_u_t vminfo1;
-	pal_vm_info_2_u_t vminfo2;
-	struct ia64_pal_retval result;
-
-	PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
-	if (!result.status) {
-		vminfo1.pvi1_val = result.v0;
-		vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
-		vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
-		result.v0 = vminfo1.pvi1_val;
-		vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
-		vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
-		result.v1 = vminfo2.pvi2_val;
-	}
-
-	return result;
-}
-
-static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
-{
-	struct ia64_pal_retval result;
-	unsigned long in0, in1, in2, in3;
-
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-
-	result.status = ia64_pal_vm_info(in1, in2,
-			(pal_tc_info_u_t *)&result.v1, &result.v2);
-
-	return result;
-}
-
-static  u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
-{
-	u64 index = 0;
-	struct exit_ctl_data *p;
-
-	p = kvm_get_exit_data(vcpu);
-	if (p->exit_reason == EXIT_REASON_PAL_CALL)
-		index = p->u.pal_data.gr28;
-
-	return index;
-}
-
-static void prepare_for_halt(struct kvm_vcpu *vcpu)
-{
-	vcpu->arch.timer_pending = 1;
-	vcpu->arch.timer_fired = 0;
-}
-
-static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
-{
-	long status;
-	unsigned long in0, in1, in2, in3, r9;
-	unsigned long pm_buffer[16];
-
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-	status = ia64_pal_perf_mon_info(pm_buffer,
-				(pal_perf_mon_info_u_t *) &r9);
-	if (status != 0) {
-		printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
-	} else {
-		if (in1)
-			memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
-		else {
-			status = PAL_STATUS_EINVAL;
-			printk(KERN_WARNING"Invalid parameters "
-						"for PAL call:0x%lx!\n", in0);
-		}
-	}
-	return (struct ia64_pal_retval){status, r9, 0, 0};
-}
-
-static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
-{
-	unsigned long in0, in1, in2, in3;
-	long status;
-	unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
-					| (1UL << 61) | (1UL << 60);
-
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-	if (in1) {
-		memcpy((void *)in1, &res, sizeof(res));
-		status = 0;
-	} else{
-		status = PAL_STATUS_EINVAL;
-		printk(KERN_WARNING"Invalid parameters "
-					"for PAL call:0x%lx!\n", in0);
-	}
-
-	return (struct ia64_pal_retval){status, 0, 0, 0};
-}
-
-static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
-{
-	unsigned long r9;
-	long status;
-
-	status = ia64_pal_mem_attrib(&r9);
-
-	return (struct ia64_pal_retval){status, r9, 0, 0};
-}
-
-static void remote_pal_prefetch_visibility(void *v)
-{
-	s64 trans_type = (s64)v;
-	ia64_pal_prefetch_visibility(trans_type);
-}
-
-static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
-{
-	struct ia64_pal_retval result = {0, 0, 0, 0};
-	unsigned long in0, in1, in2, in3;
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-	result.status = ia64_pal_prefetch_visibility(in1);
-	if (result.status == 0) {
-		/* Must be performed on all remote processors
-		in the coherence domain. */
-		smp_call_function(remote_pal_prefetch_visibility,
-					(void *)in1, 1);
-		/* Unnecessary on remote processor for other vcpus!*/
-		result.status = 1;
-	}
-	return result;
-}
-
-static void remote_pal_mc_drain(void *v)
-{
-	ia64_pal_mc_drain();
-}
-
-static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
-{
-	struct ia64_pal_retval result = {0, 0, 0, 0};
-	unsigned long in0, in1, in2, in3;
-
-	kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
-
-	if (in1 == 0 && in2) {
-		char brand_info[128];
-		result.status = ia64_pal_get_brand_info(brand_info);
-		if (result.status == PAL_STATUS_SUCCESS)
-			memcpy((void *)in2, brand_info, 128);
-	} else {
-		result.status = PAL_STATUS_REQUIRES_MEMORY;
-		printk(KERN_WARNING"Invalid parameters for "
-					"PAL call:0x%lx!\n", in0);
-	}
-
-	return result;
-}
-
-int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-
-	u64 gr28;
-	struct ia64_pal_retval result;
-	int ret = 1;
-
-	gr28 = kvm_get_pal_call_index(vcpu);
-	switch (gr28) {
-	case PAL_CACHE_FLUSH:
-		result = pal_cache_flush(vcpu);
-		break;
-	case PAL_MEM_ATTRIB:
-		result = pal_mem_attrib(vcpu);
-		break;
-	case PAL_CACHE_SUMMARY:
-		result = pal_cache_summary(vcpu);
-		break;
-	case PAL_PERF_MON_INFO:
-		result = pal_perf_mon_info(vcpu);
-		break;
-	case PAL_HALT_INFO:
-		result = pal_halt_info(vcpu);
-		break;
-	case PAL_HALT_LIGHT:
-	{
-		INIT_PAL_STATUS_SUCCESS(result);
-		prepare_for_halt(vcpu);
-		if (kvm_highest_pending_irq(vcpu) == -1)
-			ret = kvm_emulate_halt(vcpu);
-	}
-		break;
-
-	case PAL_PREFETCH_VISIBILITY:
-		result = pal_prefetch_visibility(vcpu);
-		break;
-	case PAL_MC_DRAIN:
-		result.status = ia64_pal_mc_drain();
-		/* FIXME: All vcpus likely call PAL_MC_DRAIN.
-		   That causes the congestion. */
-		smp_call_function(remote_pal_mc_drain, NULL, 1);
-		break;
-
-	case PAL_FREQ_RATIOS:
-		result = pal_freq_ratios(vcpu);
-		break;
-
-	case PAL_FREQ_BASE:
-		result = pal_freq_base(vcpu);
-		break;
-
-	case PAL_LOGICAL_TO_PHYSICAL :
-		result = pal_logical_to_physica(vcpu);
-		break;
-
-	case PAL_VM_SUMMARY :
-		result = pal_vm_summary(vcpu);
-		break;
-
-	case PAL_VM_INFO :
-		result = pal_vm_info(vcpu);
-		break;
-	case PAL_PLATFORM_ADDR :
-		result = pal_platform_addr(vcpu);
-		break;
-	case PAL_CACHE_INFO:
-		result = pal_cache_info(vcpu);
-		break;
-	case PAL_PTCE_INFO:
-		INIT_PAL_STATUS_SUCCESS(result);
-		result.v1 = (1L << 32) | 1L;
-		break;
-	case PAL_REGISTER_INFO:
-		result = pal_register_info(vcpu);
-		break;
-	case PAL_VM_PAGE_SIZE:
-		result.status = ia64_pal_vm_page_size(&result.v0,
-							&result.v1);
-		break;
-	case PAL_RSE_INFO:
-		result.status = ia64_pal_rse_info(&result.v0,
-					(pal_hints_u_t *)&result.v1);
-		break;
-	case PAL_PROC_GET_FEATURES:
-		result = pal_proc_get_features(vcpu);
-		break;
-	case PAL_DEBUG_INFO:
-		result.status = ia64_pal_debug_info(&result.v0,
-							&result.v1);
-		break;
-	case PAL_VERSION:
-		result.status = ia64_pal_version(
-				(pal_version_u_t *)&result.v0,
-				(pal_version_u_t *)&result.v1);
-		break;
-	case PAL_FIXED_ADDR:
-		result.status = PAL_STATUS_SUCCESS;
-		result.v0 = vcpu->vcpu_id;
-		break;
-	case PAL_BRAND_INFO:
-		result = pal_get_brand_info(vcpu);
-		break;
-	case PAL_GET_PSTATE:
-	case PAL_CACHE_SHARED_INFO:
-		INIT_PAL_STATUS_UNIMPLEMENTED(result);
-		break;
-	default:
-		INIT_PAL_STATUS_UNIMPLEMENTED(result);
-		printk(KERN_WARNING"kvm: Unsupported pal call,"
-					" index:0x%lx\n", gr28);
-	}
-	set_pal_result(vcpu, result);
-	return ret;
-}
-
-static struct sal_ret_values sal_emulator(struct kvm *kvm,
-				long index, unsigned long in1,
-				unsigned long in2, unsigned long in3,
-				unsigned long in4, unsigned long in5,
-				unsigned long in6, unsigned long in7)
-{
-	unsigned long r9  = 0;
-	unsigned long r10 = 0;
-	long r11 = 0;
-	long status;
-
-	status = 0;
-	switch (index) {
-	case SAL_FREQ_BASE:
-		status = ia64_sal_freq_base(in1, &r9, &r10);
-		break;
-	case SAL_PCI_CONFIG_READ:
-		printk(KERN_WARNING"kvm: Not allowed to call here!"
-			" SAL_PCI_CONFIG_READ\n");
-		break;
-	case SAL_PCI_CONFIG_WRITE:
-		printk(KERN_WARNING"kvm: Not allowed to call here!"
-			" SAL_PCI_CONFIG_WRITE\n");
-		break;
-	case SAL_SET_VECTORS:
-		if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
-			if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
-				status = -2;
-			} else {
-				kvm->arch.rdv_sal_data.boot_ip = in2;
-				kvm->arch.rdv_sal_data.boot_gp = in3;
-			}
-			printk("Rendvous called! iip:%lx\n\n", in2);
-		} else
-			printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
-							"ignored...\n", in1);
-		break;
-	case SAL_GET_STATE_INFO:
-		/* No more info.  */
-		status = -5;
-		r9 = 0;
-		break;
-	case SAL_GET_STATE_INFO_SIZE:
-		/* Return a dummy size.  */
-		status = 0;
-		r9 = 128;
-		break;
-	case SAL_CLEAR_STATE_INFO:
-		/* Noop.  */
-		break;
-	case SAL_MC_RENDEZ:
-		printk(KERN_WARNING
-			"kvm: called SAL_MC_RENDEZ. ignored...\n");
-		break;
-	case SAL_MC_SET_PARAMS:
-		printk(KERN_WARNING
-			"kvm: called  SAL_MC_SET_PARAMS.ignored!\n");
-		break;
-	case SAL_CACHE_FLUSH:
-		if (1) {
-			/*Flush using SAL.
-			This method is faster but has a side
-			effect on other vcpu running on
-			this cpu.  */
-			status = ia64_sal_cache_flush(in1);
-		} else {
-			/*Maybe need to implement the method
-			without side effect!*/
-			status = 0;
-		}
-		break;
-	case SAL_CACHE_INIT:
-		printk(KERN_WARNING
-			"kvm: called SAL_CACHE_INIT.  ignored...\n");
-		break;
-	case SAL_UPDATE_PAL:
-		printk(KERN_WARNING
-			"kvm: CALLED SAL_UPDATE_PAL.  ignored...\n");
-		break;
-	default:
-		printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
-						" index:%ld\n", index);
-		status = -1;
-		break;
-	}
-	return ((struct sal_ret_values) {status, r9, r10, r11});
-}
-
-static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
-		u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
-
-	struct exit_ctl_data *p;
-
-	p = kvm_get_exit_data(vcpu);
-
-	if (p->exit_reason == EXIT_REASON_SAL_CALL) {
-		*in0 = p->u.sal_data.in0;
-		*in1 = p->u.sal_data.in1;
-		*in2 = p->u.sal_data.in2;
-		*in3 = p->u.sal_data.in3;
-		*in4 = p->u.sal_data.in4;
-		*in5 = p->u.sal_data.in5;
-		*in6 = p->u.sal_data.in6;
-		*in7 = p->u.sal_data.in7;
-		return ;
-	}
-	*in0 = 0;
-}
-
-void kvm_sal_emul(struct kvm_vcpu *vcpu)
-{
-
-	struct sal_ret_values result;
-	u64 index, in1, in2, in3, in4, in5, in6, in7;
-
-	kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
-			&in3, &in4, &in5, &in6, &in7);
-	result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
-					in4, in5, in6, in7);
-	set_sal_result(vcpu, result);
-}
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
deleted file mode 100644
index f1268b8..0000000
--- a/arch/ia64/kvm/kvm_lib.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * kvm_lib.c: Compile some libraries for kvm-intel module.
- *
- *	Just include kernel's library, and disable symbols export.
- * 	Copyright (C) 2008, Intel Corporation.
- *  	Xiantao Zhang  (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#undef CONFIG_MODULES
-#include <linux/module.h>
-#undef CONFIG_KALLSYMS
-#undef EXPORT_SYMBOL
-#undef EXPORT_SYMBOL_GPL
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#include "../../../lib/vsprintf.c"
-#include "../../../lib/ctype.c"
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
deleted file mode 100644
index b2bcaa2..0000000
--- a/arch/ia64/kvm/kvm_minstate.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- *  kvm_minstate.h: min save macros
- *  Copyright (c) 2007, Intel Corporation.
- *
- *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- *  Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-
-#include <asm/asmmacro.h>
-#include <asm/types.h>
-#include <asm/kregs.h>
-#include <asm/kvm_host.h>
-
-#include "asm-offsets.h"
-
-#define KVM_MINSTATE_START_SAVE_MIN	     					\
-	mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
-	;;									\
-	mov.m r28 = ar.rnat;                                  			\
-	addl r22 = VMM_RBS_OFFSET,r1;            /* compute base of RBS */	\
-	;;									\
-	lfetch.fault.excl.nt1 [r22];						\
-	addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1;  \
-	mov r23 = ar.bspstore;			/* save ar.bspstore */          \
-	;;									\
-	mov ar.bspstore = r22;				/* switch to kernel RBS */\
-	;;									\
-	mov r18 = ar.bsp;							\
-	mov ar.rsc = 0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */
-
-
-
-#define KVM_MINSTATE_END_SAVE_MIN						\
-	bsw.1;          /* switch back to bank 1 (must be last in insn group) */\
-	;;
-
-
-#define PAL_VSA_SYNC_READ						\
-	/* begin to call pal vps sync_read */				\
-{.mii;									\
-	add r25 = VMM_VPD_BASE_OFFSET, r21;				\
-	nop 0x0;							\
-	mov r24=ip;							\
-	;;								\
-}									\
-{.mmb									\
-	add r24=0x20, r24;						\
-	ld8 r25 = [r25];      /* read vpd base */			\
-	br.cond.sptk kvm_vps_sync_read;		/*call the service*/	\
-	;;								\
-};									\
-
-
-#define KVM_MINSTATE_GET_CURRENT(reg)   mov reg=r21
-
-/*
- * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- *  psr.ic: off
- *  r31:	contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- *  psr.ic: off
- *   r2 = points to &pt_regs.r16
- *   r8 = contents of ar.ccv
- *   r9 = contents of ar.csd
- *  r10 = contents of ar.ssd
- *  r11 = FPSR_DEFAULT
- *  r12 = kernel sp (kernel virtual address)
- *  r13 = points to current task_struct (kernel virtual address)
- *  p15 = TRUE if psr.i is set in cr.ipsr
- *  predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
- *	  preserved
- *
- * Note that psr.ic is NOT turned on by this macro.  This is so that
- * we can pass interruption state as arguments to a handler.
- */
-
-
-#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
-
-#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)			\
-	KVM_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */	\
-	mov r27 = ar.rsc;         /* M */			\
-	mov r20 = r1;         /* A */				\
-	mov r25 = ar.unat;        /* M */			\
-	mov r29 = cr.ipsr;        /* M */			\
-	mov r26 = ar.pfs;         /* I */			\
-	mov r18 = cr.isr;         				\
-	COVER;              /* B;; (or nothing) */		\
-	;;							\
-	tbit.z p0,p15 = r29,IA64_PSR_I_BIT;			\
-	mov r1 = r16;						\
-/*	mov r21=r16;	*/					\
-	/* switch from user to kernel RBS: */			\
-	;;							\
-	invala;             /* M */				\
-	SAVE_IFS;						\
-	;;							\
-	KVM_MINSTATE_START_SAVE_MIN				\
-	adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */	\
-	adds r16 = PT(CR_IPSR),r1;				\
-	;;							\
-	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;		\
-	st8 [r16] = r29;      /* save cr.ipsr */		\
-	;;							\
-	lfetch.fault.excl.nt1 [r17];				\
-	tbit.nz p15,p0 = r29,IA64_PSR_I_BIT;			\
-	mov r29 = b0						\
-	;;							\
-	adds r16 = PT(R8),r1; /* initialize first base pointer */\
-	adds r17 = PT(R9),r1; /* initialize second base pointer */\
-	;;							\
-.mem.offset 0,0; st8.spill [r16] = r8,16;			\
-.mem.offset 8,0; st8.spill [r17] = r9,16;			\
-	;;							\
-.mem.offset 0,0; st8.spill [r16] = r10,24;			\
-.mem.offset 8,0; st8.spill [r17] = r11,24;			\
-	;;							\
-	mov r9 = cr.iip;         /* M */			\
-	mov r10 = ar.fpsr;        /* M */			\
-	;;							\
-	st8 [r16] = r9,16;    /* save cr.iip */			\
-	st8 [r17] = r30,16;   /* save cr.ifs */			\
-	sub r18 = r18,r22;    /* r18=RSE.ndirty*8 */		\
-	;;							\
-	st8 [r16] = r25,16;   /* save ar.unat */		\
-	st8 [r17] = r26,16;    /* save ar.pfs */		\
-	shl r18 = r18,16;     /* calu ar.rsc used for "loadrs" */\
-	;;							\
-	st8 [r16] = r27,16;   /* save ar.rsc */			\
-	st8 [r17] = r28,16;   /* save ar.rnat */		\
-	;;          /* avoid RAW on r16 & r17 */		\
-	st8 [r16] = r23,16;   /* save ar.bspstore */		\
-	st8 [r17] = r31,16;   /* save predicates */		\
-	;;							\
-	st8 [r16] = r29,16;   /* save b0 */			\
-	st8 [r17] = r18,16;   /* save ar.rsc value for "loadrs" */\
-	;;							\
-.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */  \
-.mem.offset 8,0; st8.spill [r17] = r12,16;			\
-	adds r12 = -16,r1;    /* switch to kernel memory stack */  \
-	;;							\
-.mem.offset 0,0; st8.spill [r16] = r13,16;			\
-.mem.offset 8,0; st8.spill [r17] = r10,16;	/* save ar.fpsr */\
-	mov r13 = r21;   /* establish `current' */		\
-	;;							\
-.mem.offset 0,0; st8.spill [r16] = r15,16;			\
-.mem.offset 8,0; st8.spill [r17] = r14,16;			\
-	;;							\
-.mem.offset 0,0; st8.spill [r16] = r2,16;			\
-.mem.offset 8,0; st8.spill [r17] = r3,16;			\
-	adds r2 = VMM_PT_REGS_R16_OFFSET,r1;			\
-	 ;;							\
-	adds r16 = VMM_VCPU_IIPA_OFFSET,r13;			\
-	adds r17 = VMM_VCPU_ISR_OFFSET,r13;			\
-	mov r26 = cr.iipa;					\
-	mov r27 = cr.isr;					\
-	;;							\
-	st8 [r16] = r26;					\
-	st8 [r17] = r27;					\
-	;;							\
-	EXTRA;							\
-	mov r8 = ar.ccv;					\
-	mov r9 = ar.csd;					\
-	mov r10 = ar.ssd;					\
-	movl r11 = FPSR_DEFAULT;   /* L-unit */			\
-	adds r17 = VMM_VCPU_GP_OFFSET,r13;			\
-	;;							\
-	ld8 r1 = [r17];/* establish kernel global pointer */	\
-	;;							\
-	PAL_VSA_SYNC_READ					\
-	KVM_MINSTATE_END_SAVE_MIN
-
-/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
- *
- * Assumed state upon entry:
- *  psr.ic: on
- *  r2: points to &pt_regs.f6
- *  r3: points to &pt_regs.f7
- *  r8: contents of ar.ccv
- *  r9: contents of ar.csd
- *  r10:	contents of ar.ssd
- *  r11:	FPSR_DEFAULT
- *
- * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
- */
-#define KVM_SAVE_REST				\
-.mem.offset 0,0; st8.spill [r2] = r16,16;	\
-.mem.offset 8,0; st8.spill [r3] = r17,16;	\
-	;;				\
-.mem.offset 0,0; st8.spill [r2] = r18,16;	\
-.mem.offset 8,0; st8.spill [r3] = r19,16;	\
-	;;				\
-.mem.offset 0,0; st8.spill [r2] = r20,16;	\
-.mem.offset 8,0; st8.spill [r3] = r21,16;	\
-	mov r18=b6;			\
-	;;				\
-.mem.offset 0,0; st8.spill [r2] = r22,16;	\
-.mem.offset 8,0; st8.spill [r3] = r23,16;	\
-	mov r19 = b7;				\
-	;;					\
-.mem.offset 0,0; st8.spill [r2] = r24,16;	\
-.mem.offset 8,0; st8.spill [r3] = r25,16;	\
-	;;					\
-.mem.offset 0,0; st8.spill [r2] = r26,16;	\
-.mem.offset 8,0; st8.spill [r3] = r27,16;	\
-	;;					\
-.mem.offset 0,0; st8.spill [r2] = r28,16;	\
-.mem.offset 8,0; st8.spill [r3] = r29,16;	\
-	;;					\
-.mem.offset 0,0; st8.spill [r2] = r30,16;	\
-.mem.offset 8,0; st8.spill [r3] = r31,32;	\
-	;;					\
-	mov ar.fpsr = r11;			\
-	st8 [r2] = r8,8;			\
-	adds r24 = PT(B6)-PT(F7),r3;		\
-	adds r25 = PT(B7)-PT(F7),r3;		\
-	;;					\
-	st8 [r24] = r18,16;       /* b6 */	\
-	st8 [r25] = r19,16;       /* b7 */	\
-	adds r2 = PT(R4)-PT(F6),r2;		\
-	adds r3 = PT(R5)-PT(F7),r3;		\
-	;;					\
-	st8 [r24] = r9;	/* ar.csd */		\
-	st8 [r25] = r10;	/* ar.ssd */	\
-	;;					\
-	mov r18 = ar.unat;			\
-	adds r19 = PT(EML_UNAT)-PT(R4),r2;	\
-	;;					\
-	st8 [r19] = r18; /* eml_unat */ 	\
-
-
-#define KVM_SAVE_EXTRA				\
-.mem.offset 0,0; st8.spill [r2] = r4,16;	\
-.mem.offset 8,0; st8.spill [r3] = r5,16;	\
-	;;					\
-.mem.offset 0,0; st8.spill [r2] = r6,16;	\
-.mem.offset 8,0; st8.spill [r3] = r7;		\
-	;;					\
-	mov r26 = ar.unat;			\
-	;;					\
-	st8 [r2] = r26;/* eml_unat */ 		\
-
-#define KVM_SAVE_MIN_WITH_COVER		KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
-#define KVM_SAVE_MIN_WITH_COVER_R19	KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
-#define KVM_SAVE_MIN			KVM_DO_SAVE_MIN(     , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
deleted file mode 100644
index c5f92a9..0000000
--- a/arch/ia64/kvm/lapic.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __KVM_IA64_LAPIC_H
-#define __KVM_IA64_LAPIC_H
-
-#include <linux/kvm_host.h>
-
-/*
- * vlsapic
- */
-struct kvm_lapic{
-	struct kvm_vcpu *vcpu;
-	uint64_t insvc[4];
-	uint64_t vhpi;
-	uint8_t xtp;
-	uint8_t pal_init_pending;
-	uint8_t pad[2];
-};
-
-int kvm_create_lapic(struct kvm_vcpu *vcpu);
-void kvm_free_lapic(struct kvm_vcpu *vcpu);
-
-int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
-int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
-		int short_hand, int dest, int dest_mode);
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
-#define kvm_apic_present(x) (true)
-#define kvm_lapic_enabled(x) (true)
-
-#endif
diff --git a/arch/ia64/kvm/memcpy.S b/arch/ia64/kvm/memcpy.S
deleted file mode 100644
index c04cdbe..0000000
--- a/arch/ia64/kvm/memcpy.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../lib/memcpy.S"
diff --git a/arch/ia64/kvm/memset.S b/arch/ia64/kvm/memset.S
deleted file mode 100644
index 83c3066..0000000
--- a/arch/ia64/kvm/memset.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../lib/memset.S"
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
deleted file mode 100644
index dd979e0..0000000
--- a/arch/ia64/kvm/misc.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef __KVM_IA64_MISC_H
-#define __KVM_IA64_MISC_H
-
-#include <linux/kvm_host.h>
-/*
- * misc.h
- * 	Copyright (C) 2007, Intel Corporation.
- *  	Xiantao Zhang  (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-/*
- *Return p2m base address at host side!
- */
-static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
-{
-	return (uint64_t *)(kvm->arch.vm_base +
-				offsetof(struct kvm_vm_data, kvm_p2m));
-}
-
-static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
-		u64 paddr, u64 mem_flags)
-{
-	uint64_t *pmt_base = kvm_host_get_pmt(kvm);
-	unsigned long pte;
-
-	pte = PAGE_ALIGN(paddr) | mem_flags;
-	pmt_base[gfn] = pte;
-}
-
-/*Function for translating host address to guest address*/
-
-static inline void *to_guest(struct kvm *kvm, void *addr)
-{
-	return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
-			KVM_VM_DATA_BASE);
-}
-
-/*Function for translating guest address to host address*/
-
-static inline void *to_host(struct kvm *kvm, void *addr)
-{
-	return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
-			+ kvm->arch.vm_base);
-}
-
-/* Get host context of the vcpu */
-static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
-{
-	union context *ctx = &vcpu->arch.host;
-	return to_guest(vcpu->kvm, ctx);
-}
-
-/* Get guest context of the vcpu */
-static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
-{
-	union context *ctx = &vcpu->arch.guest;
-	return  to_guest(vcpu->kvm, ctx);
-}
-
-/* kvm get exit data from gvmm! */
-static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
-{
-	return &vcpu->arch.exit_data;
-}
-
-/*kvm get vcpu ioreq for kvm module!*/
-static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
-{
-	struct exit_ctl_data *p_ctl_data;
-
-	if (vcpu) {
-		p_ctl_data = kvm_get_exit_data(vcpu);
-		if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
-			return &p_ctl_data->u.ioreq;
-	}
-
-	return NULL;
-}
-
-#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
deleted file mode 100644
index f1e17d3..0000000
--- a/arch/ia64/kvm/mmio.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * mmio.c: MMIO emulation components.
- * Copyright (c) 2004, Intel Corporation.
- *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- *  Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
- *
- * Copyright (c) 2007 Intel Corporation  KVM support.
- * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
- * Xiantao Zhang  (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-#include <linux/kvm_host.h>
-
-#include "vcpu.h"
-
-static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
-{
-	VLSAPIC_XTP(v) = val;
-}
-
-/*
- * LSAPIC OFFSET
- */
-#define PIB_LOW_HALF(ofst)     !(ofst & (1 << 20))
-#define PIB_OFST_INTA          0x1E0000
-#define PIB_OFST_XTP           0x1E0008
-
-/*
- * execute write IPI op.
- */
-static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
-					uint64_t addr, uint64_t data)
-{
-	struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
-	unsigned long psr;
-
-	local_irq_save(psr);
-
-	p->exit_reason = EXIT_REASON_IPI;
-	p->u.ipi_data.addr.val = addr;
-	p->u.ipi_data.data.val = data;
-	vmm_transition(current_vcpu);
-
-	local_irq_restore(psr);
-
-}
-
-void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
-			unsigned long length, unsigned long val)
-{
-	addr &= (PIB_SIZE - 1);
-
-	switch (addr) {
-	case PIB_OFST_INTA:
-		panic_vm(v, "Undefined write on PIB INTA\n");
-		break;
-	case PIB_OFST_XTP:
-		if (length == 1) {
-			vlsapic_write_xtp(v, val);
-		} else {
-			panic_vm(v, "Undefined write on PIB XTP\n");
-		}
-		break;
-	default:
-		if (PIB_LOW_HALF(addr)) {
-			/*Lower half */
-			if (length != 8)
-				panic_vm(v, "Can't LHF write with size %ld!\n",
-						length);
-			else
-				vlsapic_write_ipi(v, addr, val);
-		} else {   /*Upper half */
-			panic_vm(v, "IPI-UHF write %lx\n", addr);
-		}
-		break;
-	}
-}
-
-unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
-		unsigned long length)
-{
-	uint64_t result = 0;
-
-	addr &= (PIB_SIZE - 1);
-
-	switch (addr) {
-	case PIB_OFST_INTA:
-		if (length == 1) /* 1 byte load */
-			; /* There is no i8259, there is no INTA access*/
-		else
-			panic_vm(v, "Undefined read on PIB INTA\n");
-
-		break;
-	case PIB_OFST_XTP:
-		if (length == 1) {
-			result = VLSAPIC_XTP(v);
-		} else {
-			panic_vm(v, "Undefined read on PIB XTP\n");
-		}
-		break;
-	default:
-		panic_vm(v, "Undefined addr access for lsapic!\n");
-		break;
-	}
-	return result;
-}
-
-static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
-					u16 s, int ma, int dir)
-{
-	unsigned long iot;
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-	unsigned long psr;
-
-	iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
-
-	local_irq_save(psr);
-
-	/*Intercept the access for PIB range*/
-	if (iot == GPFN_PIB) {
-		if (!dir)
-			lsapic_write(vcpu, src_pa, s, *dest);
-		else
-			*dest = lsapic_read(vcpu, src_pa, s);
-		goto out;
-	}
-	p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
-	p->u.ioreq.addr = src_pa;
-	p->u.ioreq.size = s;
-	p->u.ioreq.dir = dir;
-	if (dir == IOREQ_WRITE)
-		p->u.ioreq.data = *dest;
-	p->u.ioreq.state = STATE_IOREQ_READY;
-	vmm_transition(vcpu);
-
-	if (p->u.ioreq.state == STATE_IORESP_READY) {
-		if (dir == IOREQ_READ)
-			/* it's necessary to ensure zero extending */
-			*dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
-	} else
-		panic_vm(vcpu, "Unhandled mmio access returned!\n");
-out:
-	local_irq_restore(psr);
-	return ;
-}
-
-/*
-   dir 1: read 0:write
-   inst_type 0:integer 1:floating point
- */
-#define SL_INTEGER	0	/* store/load interger*/
-#define SL_FLOATING	1     	/* store/load floating*/
-
-void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
-{
-	struct kvm_pt_regs *regs;
-	IA64_BUNDLE bundle;
-	int slot, dir = 0;
-	int inst_type = -1;
-	u16 size = 0;
-	u64 data, slot1a, slot1b, temp, update_reg;
-	s32 imm;
-	INST64 inst;
-
-	regs = vcpu_regs(vcpu);
-
-	if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
-		/* if fetch code fail, return and try again */
-		return;
-	}
-	slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
-	if (!slot)
-		inst.inst = bundle.slot0;
-	else if (slot == 1) {
-		slot1a = bundle.slot1a;
-		slot1b = bundle.slot1b;
-		inst.inst = slot1a + (slot1b << 18);
-	} else if (slot == 2)
-		inst.inst = bundle.slot2;
-
-	/* Integer Load/Store */
-	if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
-		inst_type = SL_INTEGER;
-		size = (inst.M1.x6 & 0x3);
-		if ((inst.M1.x6 >> 2) > 0xb) {
-			/*write*/
-			dir = IOREQ_WRITE;
-			data = vcpu_get_gr(vcpu, inst.M4.r2);
-		} else if ((inst.M1.x6 >> 2) < 0xb) {
-			/*read*/
-			dir = IOREQ_READ;
-		}
-	} else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
-		/* Integer Load + Reg update */
-		inst_type = SL_INTEGER;
-		dir = IOREQ_READ;
-		size = (inst.M2.x6 & 0x3);
-		temp = vcpu_get_gr(vcpu, inst.M2.r3);
-		update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
-		temp += update_reg;
-		vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
-	} else if (inst.M3.major == 5) {
-		/*Integer Load/Store + Imm update*/
-		inst_type = SL_INTEGER;
-		size = (inst.M3.x6&0x3);
-		if ((inst.M5.x6 >> 2) > 0xb) {
-			/*write*/
-			dir = IOREQ_WRITE;
-			data = vcpu_get_gr(vcpu, inst.M5.r2);
-			temp = vcpu_get_gr(vcpu, inst.M5.r3);
-			imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
-				(inst.M5.imm7 << 23);
-			temp += imm >> 23;
-			vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
-
-		} else if ((inst.M3.x6 >> 2) < 0xb) {
-			/*read*/
-			dir = IOREQ_READ;
-			temp = vcpu_get_gr(vcpu, inst.M3.r3);
-			imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
-				(inst.M3.imm7 << 23);
-			temp += imm >> 23;
-			vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
-
-		}
-	} else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
-				&& inst.M9.m == 0 && inst.M9.x == 0) {
-		/* Floating-point spill*/
-		struct ia64_fpreg v;
-
-		inst_type = SL_FLOATING;
-		dir = IOREQ_WRITE;
-		vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
-		/* Write high word. FIXME: this is a kludge!  */
-		v.u.bits[1] &= 0x3ffff;
-		mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
-			    ma, IOREQ_WRITE);
-		data = v.u.bits[0];
-		size = 3;
-	} else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
-		/* Floating-point spill + Imm update */
-		struct ia64_fpreg v;
-
-		inst_type = SL_FLOATING;
-		dir = IOREQ_WRITE;
-		vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
-		temp = vcpu_get_gr(vcpu, inst.M10.r3);
-		imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
-			(inst.M10.imm7 << 23);
-		temp += imm >> 23;
-		vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
-
-		/* Write high word.FIXME: this is a kludge!  */
-		v.u.bits[1] &= 0x3ffff;
-		mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
-			    8, ma, IOREQ_WRITE);
-		data = v.u.bits[0];
-		size = 3;
-	} else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
-		/* Floating-point stf8 + Imm update */
-		struct ia64_fpreg v;
-		inst_type = SL_FLOATING;
-		dir = IOREQ_WRITE;
-		size = 3;
-		vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
-		data = v.u.bits[0]; /* Significand.  */
-		temp = vcpu_get_gr(vcpu, inst.M10.r3);
-		imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
-			(inst.M10.imm7 << 23);
-		temp += imm >> 23;
-		vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
-	} else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
-			&& inst.M15.x6 <= 0x2f) {
-		temp = vcpu_get_gr(vcpu, inst.M15.r3);
-		imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
-			(inst.M15.imm7 << 23);
-		temp += imm >> 23;
-		vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
-
-		vcpu_increment_iip(vcpu);
-		return;
-	} else if (inst.M12.major == 6 && inst.M12.m == 1
-			&& inst.M12.x == 1 && inst.M12.x6 == 1) {
-		/* Floating-point Load Pair + Imm ldfp8 M12*/
-		struct ia64_fpreg v;
-
-		inst_type = SL_FLOATING;
-		dir = IOREQ_READ;
-		size = 8;     /*ldfd*/
-		mmio_access(vcpu, padr, &data, size, ma, dir);
-		v.u.bits[0] = data;
-		v.u.bits[1] = 0x1003E;
-		vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
-		padr += 8;
-		mmio_access(vcpu, padr, &data, size, ma, dir);
-		v.u.bits[0] = data;
-		v.u.bits[1] = 0x1003E;
-		vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
-		padr += 8;
-		vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
-		vcpu_increment_iip(vcpu);
-		return;
-	} else {
-		inst_type = -1;
-		panic_vm(vcpu, "Unsupported MMIO access instruction! "
-				"Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
-				bundle.i64[0], bundle.i64[1]);
-	}
-
-	size = 1 << size;
-	if (dir == IOREQ_WRITE) {
-		mmio_access(vcpu, padr, &data, size, ma, dir);
-	} else {
-		mmio_access(vcpu, padr, &data, size, ma, dir);
-		if (inst_type == SL_INTEGER)
-			vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
-		else
-			panic_vm(vcpu, "Unsupported instruction type!\n");
-
-	}
-	vcpu_increment_iip(vcpu);
-}
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
deleted file mode 100644
index f793be3..0000000
--- a/arch/ia64/kvm/optvfault.S
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*
- * arch/ia64/kvm/optvfault.S
- * optimize virtualization fault handler
- *
- * Copyright (C) 2006 Intel Co
- *	Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
- * Copyright (C) 2008 Intel Co
- *      Add the support for Tukwila processors.
- *	Xiantao Zhang <xiantao.zhang@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include <asm/processor.h>
-#include <asm/kvm_host.h>
-
-#include "vti.h"
-#include "asm-offsets.h"
-
-#define ACCE_MOV_FROM_AR
-#define ACCE_MOV_FROM_RR
-#define ACCE_MOV_TO_RR
-#define ACCE_RSM
-#define ACCE_SSM
-#define ACCE_MOV_TO_PSR
-#define ACCE_THASH
-
-#define VMX_VPS_SYNC_READ			\
-	add r16=VMM_VPD_BASE_OFFSET,r21;	\
-	mov r17 = b0;				\
-	mov r18 = r24;				\
-	mov r19 = r25;				\
-	mov r20 = r31;				\
-	;;					\
-{.mii;						\
-	ld8 r16 = [r16];			\
-	nop 0x0;				\
-	mov r24 = ip;				\
-	;;					\
-};						\
-{.mmb;						\
-	add r24=0x20, r24;			\
-	mov r25 =r16;				\
-	br.sptk.many kvm_vps_sync_read;		\
-};						\
-	mov b0 = r17;				\
-	mov r24 = r18;				\
-	mov r25 = r19;				\
-	mov r31 = r20
-
-ENTRY(kvm_vps_entry)
-	adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
-	;;
-	ld8 r29 = [r29]
-	;;
-	add r29 = r29, r30
-	;;
-	mov b0 = r29
-	br.sptk.many b0
-END(kvm_vps_entry)
-
-/*
- *	Inputs:
- *	r24 : return address
- *  	r25 : vpd
- *	r29 : scratch
- *
- */
-GLOBAL_ENTRY(kvm_vps_sync_read)
-	movl r30 = PAL_VPS_SYNC_READ
-	;;
-	br.sptk.many kvm_vps_entry
-END(kvm_vps_sync_read)
-
-/*
- *	Inputs:
- *	r24 : return address
- *  	r25 : vpd
- *	r29 : scratch
- *
- */
-GLOBAL_ENTRY(kvm_vps_sync_write)
-	movl r30 = PAL_VPS_SYNC_WRITE
-	;;
-	br.sptk.many kvm_vps_entry
-END(kvm_vps_sync_write)
-
-/*
- *	Inputs:
- *	r23 : pr
- *	r24 : guest b0
- *  	r25 : vpd
- *
- */
-GLOBAL_ENTRY(kvm_vps_resume_normal)
-	movl r30 = PAL_VPS_RESUME_NORMAL
-	;;
-	mov pr=r23,-2
-	br.sptk.many kvm_vps_entry
-END(kvm_vps_resume_normal)
-
-/*
- *	Inputs:
- *	r23 : pr
- *	r24 : guest b0
- *  	r25 : vpd
- *	r17 : isr
- */
-GLOBAL_ENTRY(kvm_vps_resume_handler)
-	movl r30 = PAL_VPS_RESUME_HANDLER
-	;;
-	ld8 r26=[r25]
-	shr r17=r17,IA64_ISR_IR_BIT
-	;;
-	dep r26=r17,r26,63,1   // bit 63 of r26 indicate whether enable CFLE
-	mov pr=r23,-2
-	br.sptk.many kvm_vps_entry
-END(kvm_vps_resume_handler)
-
-//mov r1=ar3
-GLOBAL_ENTRY(kvm_asm_mov_from_ar)
-#ifndef ACCE_MOV_FROM_AR
-	br.many kvm_virtualization_fault_back
-#endif
-	add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
-	add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
-	extr.u r17=r25,6,7
-	;;
-	ld8 r18=[r18]
-	mov r19=ar.itc
-	mov r24=b0
-	;;
-	add r19=r19,r18
-	addl r20=@gprel(asm_mov_to_reg),gp
-	;;
-	st8 [r16] = r19
-	adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
-	shladd r17=r17,4,r20
-	;;
-	mov b0=r17
-	br.sptk.few b0
-	;;
-END(kvm_asm_mov_from_ar)
-
-/*
- * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
- * clock as it's source for emulating the ITC. This version will be
- * copied on top of the original version if the host is determined to
- * be an SN2.
- */
-GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
-	add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
-	movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
-
-	add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
-	extr.u r17=r25,6,7
-	mov r24=b0
-	;;
-	ld8 r18=[r18]
-	ld8 r19=[r19]
-	addl r20=@gprel(asm_mov_to_reg),gp
-	;;
-	add r19=r19,r18
-	shladd r17=r17,4,r20
-	;;
-	adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
-	st8 [r16] = r19
-	mov b0=r17
-	br.sptk.few b0
-	;;
-END(kvm_asm_mov_from_ar_sn2)
-
-
-
-// mov r1=rr[r3]
-GLOBAL_ENTRY(kvm_asm_mov_from_rr)
-#ifndef ACCE_MOV_FROM_RR
-	br.many kvm_virtualization_fault_back
-#endif
-	extr.u r16=r25,20,7
-	extr.u r17=r25,6,7
-	addl r20=@gprel(asm_mov_from_reg),gp
-	;;
-	adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
-	shladd r16=r16,4,r20
-	mov r24=b0
-	;;
-	add r27=VMM_VCPU_VRR0_OFFSET,r21
-	mov b0=r16
-	br.many b0
-	;;
-kvm_asm_mov_from_rr_back_1:
-	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
-	adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
-	shr.u r26=r19,61
-	;;
-	shladd r17=r17,4,r22
-	shladd r27=r26,3,r27
-	;;
-	ld8 r19=[r27]
-	mov b0=r17
-	br.many b0
-END(kvm_asm_mov_from_rr)
-
-
-// mov rr[r3]=r2
-GLOBAL_ENTRY(kvm_asm_mov_to_rr)
-#ifndef ACCE_MOV_TO_RR
-	br.many kvm_virtualization_fault_back
-#endif
-	extr.u r16=r25,20,7
-	extr.u r17=r25,13,7
-	addl r20=@gprel(asm_mov_from_reg),gp
-	;;
-	adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
-	shladd r16=r16,4,r20
-	mov r22=b0
-	;;
-	add r27=VMM_VCPU_VRR0_OFFSET,r21
-	mov b0=r16
-	br.many b0
-	;;
-kvm_asm_mov_to_rr_back_1:
-	adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
-	shr.u r23=r19,61
-	shladd r17=r17,4,r20
-	;;
-	//if rr6, go back
-	cmp.eq p6,p0=6,r23
-	mov b0=r22
-	(p6) br.cond.dpnt.many kvm_virtualization_fault_back
-	;;
-	mov r28=r19
-	mov b0=r17
-	br.many b0
-kvm_asm_mov_to_rr_back_2:
-	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
-	shladd r27=r23,3,r27
-	;; // vrr.rid<<4 |0xe
-	st8 [r27]=r19
-	mov b0=r30
-	;;
-	extr.u r16=r19,8,26
-	extr.u r18 =r19,2,6
-	mov r17 =0xe
-	;;
-	shladd r16 = r16, 4, r17
-	extr.u r19 =r19,0,8
-	;;
-	shl r16 = r16,8
-	;;
-	add r19 = r19, r16
-	;; //set ve 1
-	dep r19=-1,r19,0,1
-	cmp.lt p6,p0=14,r18
-	;;
-	(p6) mov r18=14
-	;;
-	(p6) dep r19=r18,r19,2,6
-	;;
-	cmp.eq p6,p0=0,r23
-	;;
-	cmp.eq.or p6,p0=4,r23
-	;;
-	adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
-	(p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
-	;;
-	ld4 r16=[r16]
-	cmp.eq p7,p0=r0,r0
-	(p6) shladd r17=r23,1,r17
-	;;
-	(p6) st8 [r17]=r19
-	(p6) tbit.nz p6,p7=r16,0
-	;;
-	(p7) mov rr[r28]=r19
-	mov r24=r22
-	br.many b0
-END(kvm_asm_mov_to_rr)
-
-
-//rsm
-GLOBAL_ENTRY(kvm_asm_rsm)
-#ifndef ACCE_RSM
-	br.many kvm_virtualization_fault_back
-#endif
-	VMX_VPS_SYNC_READ
-	;;
-	extr.u r26=r25,6,21
-	extr.u r27=r25,31,2
-	;;
-	extr.u r28=r25,36,1
-	dep r26=r27,r26,21,2
-	;;
-	add r17=VPD_VPSR_START_OFFSET,r16
-	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
-	//r26 is imm24
-	dep r26=r28,r26,23,1
-	;;
-	ld8 r18=[r17]
-	movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
-	ld4 r23=[r22]
-	sub r27=-1,r26
-	mov r24=b0
-	;;
-	mov r20=cr.ipsr
-	or r28=r27,r28
-	and r19=r18,r27
-	;;
-	st8 [r17]=r19
-	and r20=r20,r28
-	/* Comment it out due to short of fp lazy alorgithm support
-	adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
-	;;
-	ld8 r27=[r27]
-	;;
-	tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
-	;;
-	(p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
-	*/
-	;;
-	mov cr.ipsr=r20
-	tbit.nz p6,p0=r23,0
-	;;
-	tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
-	(p6) br.dptk kvm_resume_to_guest_with_sync
-	;;
-	add r26=VMM_VCPU_META_RR0_OFFSET,r21
-	add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
-	dep r23=-1,r23,0,1
-	;;
-	ld8 r26=[r26]
-	ld8 r27=[r27]
-	st4 [r22]=r23
-	dep.z r28=4,61,3
-	;;
-	mov rr[r0]=r26
-	;;
-	mov rr[r28]=r27
-	;;
-	srlz.d
-	br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_rsm)
-
-
-//ssm
-GLOBAL_ENTRY(kvm_asm_ssm)
-#ifndef ACCE_SSM
-	br.many kvm_virtualization_fault_back
-#endif
-	VMX_VPS_SYNC_READ
-	;;
-	extr.u r26=r25,6,21
-	extr.u r27=r25,31,2
-	;;
-	extr.u r28=r25,36,1
-	dep r26=r27,r26,21,2
-	;;  //r26 is imm24
-	add r27=VPD_VPSR_START_OFFSET,r16
-	dep r26=r28,r26,23,1
-	;;  //r19 vpsr
-	ld8 r29=[r27]
-	mov r24=b0
-	;;
-	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
-	mov r20=cr.ipsr
-	or r19=r29,r26
-	;;
-	ld4 r23=[r22]
-	st8 [r27]=r19
-	or r20=r20,r26
-	;;
-	mov cr.ipsr=r20
-	movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
-	;;
-	and r19=r28,r19
-	tbit.z p6,p0=r23,0
-	;;
-	cmp.ne.or p6,p0=r28,r19
-	(p6) br.dptk kvm_asm_ssm_1
-	;;
-	add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
-	add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
-	dep r23=0,r23,0,1
-	;;
-	ld8 r26=[r26]
-	ld8 r27=[r27]
-	st4 [r22]=r23
-	dep.z r28=4,61,3
-	;;
-	mov rr[r0]=r26
-	;;
-	mov rr[r28]=r27
-	;;
-	srlz.d
-	;;
-kvm_asm_ssm_1:
-	tbit.nz p6,p0=r29,IA64_PSR_I_BIT
-	;;
-	tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
-	(p6) br.dptk kvm_resume_to_guest_with_sync
-	;;
-	add r29=VPD_VTPR_START_OFFSET,r16
-	add r30=VPD_VHPI_START_OFFSET,r16
-	;;
-	ld8 r29=[r29]
-	ld8 r30=[r30]
-	;;
-	extr.u r17=r29,4,4
-	extr.u r18=r29,16,1
-	;;
-	dep r17=r18,r17,4,1
-	;;
-	cmp.gt p6,p0=r30,r17
-	(p6) br.dpnt.few kvm_asm_dispatch_vexirq
-	br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_ssm)
-
-
-//mov psr.l=r2
-GLOBAL_ENTRY(kvm_asm_mov_to_psr)
-#ifndef ACCE_MOV_TO_PSR
-	br.many kvm_virtualization_fault_back
-#endif
-	VMX_VPS_SYNC_READ
-	;;
-	extr.u r26=r25,13,7 //r2
-	addl r20=@gprel(asm_mov_from_reg),gp
-	;;
-	adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
-	shladd r26=r26,4,r20
-	mov r24=b0
-	;;
-	add r27=VPD_VPSR_START_OFFSET,r16
-	mov b0=r26
-	br.many b0
-	;;
-kvm_asm_mov_to_psr_back:
-	ld8 r17=[r27]
-	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
-	dep r19=0,r19,32,32
-	;;
-	ld4 r23=[r22]
-	dep r18=0,r17,0,32
-	;;
-	add r30=r18,r19
-	movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
-	;;
-	st8 [r27]=r30
-	and r27=r28,r30
-	and r29=r28,r17
-	;;
-	cmp.eq p5,p0=r29,r27
-	cmp.eq p6,p7=r28,r27
-	(p5) br.many kvm_asm_mov_to_psr_1
-	;;
-	//virtual to physical
-	(p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
-	(p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
-	(p7) dep r23=-1,r23,0,1
-	;;
-	//physical to virtual
-	(p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
-	(p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
-	(p6) dep r23=0,r23,0,1
-	;;
-	ld8 r26=[r26]
-	ld8 r27=[r27]
-	st4 [r22]=r23
-	dep.z r28=4,61,3
-	;;
-	mov rr[r0]=r26
-	;;
-	mov rr[r28]=r27
-	;;
-	srlz.d
-	;;
-kvm_asm_mov_to_psr_1:
-	mov r20=cr.ipsr
-	movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
-	;;
-	or r19=r19,r28
-	dep r20=0,r20,0,32
-	;;
-	add r20=r19,r20
-	mov b0=r24
-	;;
-	/* Comment it out due to short of fp lazy algorithm support
-	adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
-	;;
-	ld8 r27=[r27]
-	;;
-	tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
-	;;
-	(p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
-	;;
-	*/
-	mov cr.ipsr=r20
-	cmp.ne p6,p0=r0,r0
-	;;
-	tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
-	tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
-	(p6) br.dpnt.few kvm_resume_to_guest_with_sync
-	;;
-	add r29=VPD_VTPR_START_OFFSET,r16
-	add r30=VPD_VHPI_START_OFFSET,r16
-	;;
-	ld8 r29=[r29]
-	ld8 r30=[r30]
-	;;
-	extr.u r17=r29,4,4
-	extr.u r18=r29,16,1
-	;;
-	dep r17=r18,r17,4,1
-	;;
-	cmp.gt p6,p0=r30,r17
-	(p6) br.dpnt.few kvm_asm_dispatch_vexirq
-	br.many kvm_resume_to_guest_with_sync
-END(kvm_asm_mov_to_psr)
-
-
-ENTRY(kvm_asm_dispatch_vexirq)
-//increment iip
-	mov r17 = b0
-	mov r18 = r31
-{.mii
-	add r25=VMM_VPD_BASE_OFFSET,r21
-	nop 0x0
-	mov r24 = ip
-	;;
-}
-{.mmb
-	add r24 = 0x20, r24
-	ld8 r25 = [r25]
-	br.sptk.many kvm_vps_sync_write
-}
-	mov b0 =r17
-	mov r16=cr.ipsr
-	mov r31 = r18
-	mov r19 = 37
-	;;
-	extr.u r17=r16,IA64_PSR_RI_BIT,2
-	tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
-	;;
-	(p6) mov r18=cr.iip
-	(p6) mov r17=r0
-	(p7) add r17=1,r17
-	;;
-	(p6) add r18=0x10,r18
-	dep r16=r17,r16,IA64_PSR_RI_BIT,2
-	;;
-	(p6) mov cr.iip=r18
-	mov cr.ipsr=r16
-	mov r30 =1
-	br.many kvm_dispatch_vexirq
-END(kvm_asm_dispatch_vexirq)
-
-// thash
-// TODO: add support when pta.vf = 1
-GLOBAL_ENTRY(kvm_asm_thash)
-#ifndef ACCE_THASH
-	br.many kvm_virtualization_fault_back
-#endif
-	extr.u r17=r25,20,7		// get r3 from opcode in r25
-	extr.u r18=r25,6,7		// get r1 from opcode in r25
-	addl r20=@gprel(asm_mov_from_reg),gp
-	;;
-	adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
-	shladd r17=r17,4,r20	// get addr of MOVE_FROM_REG(r17)
-	adds r16=VMM_VPD_BASE_OFFSET,r21	// get vcpu.arch.priveregs
-	;;
-	mov r24=b0
-	;;
-	ld8 r16=[r16]		// get VPD addr
-	mov b0=r17
-	br.many b0			// r19 return value
-	;;
-kvm_asm_thash_back1:
-	shr.u r23=r19,61		// get RR number
-	adds r28=VMM_VCPU_VRR0_OFFSET,r21	// get vcpu->arch.vrr[0]'s addr
-	adds r16=VMM_VPD_VPTA_OFFSET,r16	// get vpta
-	;;
-	shladd r27=r23,3,r28	// get vcpu->arch.vrr[r23]'s addr
-	ld8 r17=[r16]		// get PTA
-	mov r26=1
-	;;
-	extr.u r29=r17,2,6	// get pta.size
-	ld8 r28=[r27]		// get vcpu->arch.vrr[r23]'s value
-	;;
-	mov b0=r24
-	//Fallback to C if pta.vf is set
-	tbit.nz p6,p0=r17, 8
-	;;
-	(p6) mov r24=EVENT_THASH
-	(p6) br.cond.dpnt.many kvm_virtualization_fault_back
-	extr.u r28=r28,2,6	// get rr.ps
-	shl r22=r26,r29		// 1UL << pta.size
-	;;
-	shr.u r23=r19,r28	// vaddr >> rr.ps
-	adds r26=3,r29		// pta.size + 3
-	shl r27=r17,3		// pta << 3
-	;;
-	shl r23=r23,3		// (vaddr >> rr.ps) << 3
-	shr.u r27=r27,r26	// (pta << 3) >> (pta.size+3)
-	movl r16=7<<61
-	;;
-	adds r22=-1,r22		// (1UL << pta.size) - 1
-	shl r27=r27,r29		// ((pta<<3)>>(pta.size+3))<<pta.size
-	and r19=r19,r16		// vaddr & VRN_MASK
-	;;
-	and r22=r22,r23		// vhpt_offset
-	or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
-	adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
-	;;
-	or r19=r19,r22		// calc pval
-	shladd r17=r18,4,r26
-	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
-	;;
-	mov b0=r17
-	br.many b0
-END(kvm_asm_thash)
-
-#define MOV_TO_REG0	\
-{;			\
-	nop.b 0x0;		\
-	nop.b 0x0;		\
-	nop.b 0x0;		\
-	;;			\
-};
-
-
-#define MOV_TO_REG(n)	\
-{;			\
-	mov r##n##=r19;	\
-	mov b0=r30;	\
-	br.sptk.many b0;	\
-	;;			\
-};
-
-
-#define MOV_FROM_REG(n)	\
-{;				\
-	mov r19=r##n##;		\
-	mov b0=r30;		\
-	br.sptk.many b0;		\
-	;;				\
-};
-
-
-#define MOV_TO_BANK0_REG(n)			\
-ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##);	\
-{;						\
-	mov r26=r2;				\
-	mov r2=r19;				\
-	bsw.1;					\
-	;;						\
-};						\
-{;						\
-	mov r##n##=r2;				\
-	nop.b 0x0;					\
-	bsw.0;					\
-	;;						\
-};						\
-{;						\
-	mov r2=r26;				\
-	mov b0=r30;				\
-	br.sptk.many b0;				\
-	;;						\
-};						\
-END(asm_mov_to_bank0_reg##n##)
-
-
-#define MOV_FROM_BANK0_REG(n)			\
-ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##);	\
-{;						\
-	mov r26=r2;				\
-	nop.b 0x0;					\
-	bsw.1;					\
-	;;						\
-};						\
-{;						\
-	mov r2=r##n##;				\
-	nop.b 0x0;					\
-	bsw.0;					\
-	;;						\
-};						\
-{;						\
-	mov r19=r2;				\
-	mov r2=r26;				\
-	mov b0=r30;				\
-};						\
-{;						\
-	nop.b 0x0;					\
-	nop.b 0x0;					\
-	br.sptk.many b0;				\
-	;;						\
-};						\
-END(asm_mov_from_bank0_reg##n##)
-
-
-#define JMP_TO_MOV_TO_BANK0_REG(n)		\
-{;						\
-	nop.b 0x0;					\
-	nop.b 0x0;					\
-	br.sptk.many asm_mov_to_bank0_reg##n##;	\
-	;;						\
-}
-
-
-#define JMP_TO_MOV_FROM_BANK0_REG(n)		\
-{;						\
-	nop.b 0x0;					\
-	nop.b 0x0;					\
-	br.sptk.many asm_mov_from_bank0_reg##n##;	\
-	;;						\
-}
-
-
-MOV_FROM_BANK0_REG(16)
-MOV_FROM_BANK0_REG(17)
-MOV_FROM_BANK0_REG(18)
-MOV_FROM_BANK0_REG(19)
-MOV_FROM_BANK0_REG(20)
-MOV_FROM_BANK0_REG(21)
-MOV_FROM_BANK0_REG(22)
-MOV_FROM_BANK0_REG(23)
-MOV_FROM_BANK0_REG(24)
-MOV_FROM_BANK0_REG(25)
-MOV_FROM_BANK0_REG(26)
-MOV_FROM_BANK0_REG(27)
-MOV_FROM_BANK0_REG(28)
-MOV_FROM_BANK0_REG(29)
-MOV_FROM_BANK0_REG(30)
-MOV_FROM_BANK0_REG(31)
-
-
-// mov from reg table
-ENTRY(asm_mov_from_reg)
-	MOV_FROM_REG(0)
-	MOV_FROM_REG(1)
-	MOV_FROM_REG(2)
-	MOV_FROM_REG(3)
-	MOV_FROM_REG(4)
-	MOV_FROM_REG(5)
-	MOV_FROM_REG(6)
-	MOV_FROM_REG(7)
-	MOV_FROM_REG(8)
-	MOV_FROM_REG(9)
-	MOV_FROM_REG(10)
-	MOV_FROM_REG(11)
-	MOV_FROM_REG(12)
-	MOV_FROM_REG(13)
-	MOV_FROM_REG(14)
-	MOV_FROM_REG(15)
-	JMP_TO_MOV_FROM_BANK0_REG(16)
-	JMP_TO_MOV_FROM_BANK0_REG(17)
-	JMP_TO_MOV_FROM_BANK0_REG(18)
-	JMP_TO_MOV_FROM_BANK0_REG(19)
-	JMP_TO_MOV_FROM_BANK0_REG(20)
-	JMP_TO_MOV_FROM_BANK0_REG(21)
-	JMP_TO_MOV_FROM_BANK0_REG(22)
-	JMP_TO_MOV_FROM_BANK0_REG(23)
-	JMP_TO_MOV_FROM_BANK0_REG(24)
-	JMP_TO_MOV_FROM_BANK0_REG(25)
-	JMP_TO_MOV_FROM_BANK0_REG(26)
-	JMP_TO_MOV_FROM_BANK0_REG(27)
-	JMP_TO_MOV_FROM_BANK0_REG(28)
-	JMP_TO_MOV_FROM_BANK0_REG(29)
-	JMP_TO_MOV_FROM_BANK0_REG(30)
-	JMP_TO_MOV_FROM_BANK0_REG(31)
-	MOV_FROM_REG(32)
-	MOV_FROM_REG(33)
-	MOV_FROM_REG(34)
-	MOV_FROM_REG(35)
-	MOV_FROM_REG(36)
-	MOV_FROM_REG(37)
-	MOV_FROM_REG(38)
-	MOV_FROM_REG(39)
-	MOV_FROM_REG(40)
-	MOV_FROM_REG(41)
-	MOV_FROM_REG(42)
-	MOV_FROM_REG(43)
-	MOV_FROM_REG(44)
-	MOV_FROM_REG(45)
-	MOV_FROM_REG(46)
-	MOV_FROM_REG(47)
-	MOV_FROM_REG(48)
-	MOV_FROM_REG(49)
-	MOV_FROM_REG(50)
-	MOV_FROM_REG(51)
-	MOV_FROM_REG(52)
-	MOV_FROM_REG(53)
-	MOV_FROM_REG(54)
-	MOV_FROM_REG(55)
-	MOV_FROM_REG(56)
-	MOV_FROM_REG(57)
-	MOV_FROM_REG(58)
-	MOV_FROM_REG(59)
-	MOV_FROM_REG(60)
-	MOV_FROM_REG(61)
-	MOV_FROM_REG(62)
-	MOV_FROM_REG(63)
-	MOV_FROM_REG(64)
-	MOV_FROM_REG(65)
-	MOV_FROM_REG(66)
-	MOV_FROM_REG(67)
-	MOV_FROM_REG(68)
-	MOV_FROM_REG(69)
-	MOV_FROM_REG(70)
-	MOV_FROM_REG(71)
-	MOV_FROM_REG(72)
-	MOV_FROM_REG(73)
-	MOV_FROM_REG(74)
-	MOV_FROM_REG(75)
-	MOV_FROM_REG(76)
-	MOV_FROM_REG(77)
-	MOV_FROM_REG(78)
-	MOV_FROM_REG(79)
-	MOV_FROM_REG(80)
-	MOV_FROM_REG(81)
-	MOV_FROM_REG(82)
-	MOV_FROM_REG(83)
-	MOV_FROM_REG(84)
-	MOV_FROM_REG(85)
-	MOV_FROM_REG(86)
-	MOV_FROM_REG(87)
-	MOV_FROM_REG(88)
-	MOV_FROM_REG(89)
-	MOV_FROM_REG(90)
-	MOV_FROM_REG(91)
-	MOV_FROM_REG(92)
-	MOV_FROM_REG(93)
-	MOV_FROM_REG(94)
-	MOV_FROM_REG(95)
-	MOV_FROM_REG(96)
-	MOV_FROM_REG(97)
-	MOV_FROM_REG(98)
-	MOV_FROM_REG(99)
-	MOV_FROM_REG(100)
-	MOV_FROM_REG(101)
-	MOV_FROM_REG(102)
-	MOV_FROM_REG(103)
-	MOV_FROM_REG(104)
-	MOV_FROM_REG(105)
-	MOV_FROM_REG(106)
-	MOV_FROM_REG(107)
-	MOV_FROM_REG(108)
-	MOV_FROM_REG(109)
-	MOV_FROM_REG(110)
-	MOV_FROM_REG(111)
-	MOV_FROM_REG(112)
-	MOV_FROM_REG(113)
-	MOV_FROM_REG(114)
-	MOV_FROM_REG(115)
-	MOV_FROM_REG(116)
-	MOV_FROM_REG(117)
-	MOV_FROM_REG(118)
-	MOV_FROM_REG(119)
-	MOV_FROM_REG(120)
-	MOV_FROM_REG(121)
-	MOV_FROM_REG(122)
-	MOV_FROM_REG(123)
-	MOV_FROM_REG(124)
-	MOV_FROM_REG(125)
-	MOV_FROM_REG(126)
-	MOV_FROM_REG(127)
-END(asm_mov_from_reg)
-
-
-/* must be in bank 0
- * parameter:
- * r31: pr
- * r24: b0
- */
-ENTRY(kvm_resume_to_guest_with_sync)
-	adds r19=VMM_VPD_BASE_OFFSET,r21
-	mov r16 = r31
-	mov r17 = r24
-	;;
-{.mii
-	ld8 r25 =[r19]
-	nop 0x0
-	mov r24 = ip
-	;;
-}
-{.mmb
-	add r24 =0x20, r24
-	nop 0x0
-	br.sptk.many kvm_vps_sync_write
-}
-
-	mov r31 = r16
-	mov r24 =r17
-	;;
-	br.sptk.many kvm_resume_to_guest
-END(kvm_resume_to_guest_with_sync)
-
-ENTRY(kvm_resume_to_guest)
-	adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
-	;;
-	ld8 r1 =[r16]
-	adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
-	;;
-	mov r16=cr.ipsr
-	;;
-	ld8 r20 = [r20]
-	adds r19=VMM_VPD_BASE_OFFSET,r21
-	;;
-	ld8 r25=[r19]
-	extr.u r17=r16,IA64_PSR_RI_BIT,2
-	tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
-	;;
-	(p6) mov r18=cr.iip
-	(p6) mov r17=r0
-	;;
-	(p6) add r18=0x10,r18
-	(p7) add r17=1,r17
-	;;
-	(p6) mov cr.iip=r18
-	dep r16=r17,r16,IA64_PSR_RI_BIT,2
-	;;
-	mov cr.ipsr=r16
-	adds r19= VPD_VPSR_START_OFFSET,r25
-	add r28=PAL_VPS_RESUME_NORMAL,r20
-	add r29=PAL_VPS_RESUME_HANDLER,r20
-	;;
-	ld8 r19=[r19]
-	mov b0=r29
-	mov r27=cr.isr
-	;;
-	tbit.z p6,p7 = r19,IA64_PSR_IC_BIT		// p7=vpsr.ic
-	shr r27=r27,IA64_ISR_IR_BIT
-	;;
-	(p6) ld8 r26=[r25]
-	(p7) mov b0=r28
-	;;
-	(p6) dep r26=r27,r26,63,1
-	mov pr=r31,-2
-	br.sptk.many b0             // call pal service
-	;;
-END(kvm_resume_to_guest)
-
-
-MOV_TO_BANK0_REG(16)
-MOV_TO_BANK0_REG(17)
-MOV_TO_BANK0_REG(18)
-MOV_TO_BANK0_REG(19)
-MOV_TO_BANK0_REG(20)
-MOV_TO_BANK0_REG(21)
-MOV_TO_BANK0_REG(22)
-MOV_TO_BANK0_REG(23)
-MOV_TO_BANK0_REG(24)
-MOV_TO_BANK0_REG(25)
-MOV_TO_BANK0_REG(26)
-MOV_TO_BANK0_REG(27)
-MOV_TO_BANK0_REG(28)
-MOV_TO_BANK0_REG(29)
-MOV_TO_BANK0_REG(30)
-MOV_TO_BANK0_REG(31)
-
-
-// mov to reg table
-ENTRY(asm_mov_to_reg)
-	MOV_TO_REG0
-	MOV_TO_REG(1)
-	MOV_TO_REG(2)
-	MOV_TO_REG(3)
-	MOV_TO_REG(4)
-	MOV_TO_REG(5)
-	MOV_TO_REG(6)
-	MOV_TO_REG(7)
-	MOV_TO_REG(8)
-	MOV_TO_REG(9)
-	MOV_TO_REG(10)
-	MOV_TO_REG(11)
-	MOV_TO_REG(12)
-	MOV_TO_REG(13)
-	MOV_TO_REG(14)
-	MOV_TO_REG(15)
-	JMP_TO_MOV_TO_BANK0_REG(16)
-	JMP_TO_MOV_TO_BANK0_REG(17)
-	JMP_TO_MOV_TO_BANK0_REG(18)
-	JMP_TO_MOV_TO_BANK0_REG(19)
-	JMP_TO_MOV_TO_BANK0_REG(20)
-	JMP_TO_MOV_TO_BANK0_REG(21)
-	JMP_TO_MOV_TO_BANK0_REG(22)
-	JMP_TO_MOV_TO_BANK0_REG(23)
-	JMP_TO_MOV_TO_BANK0_REG(24)
-	JMP_TO_MOV_TO_BANK0_REG(25)
-	JMP_TO_MOV_TO_BANK0_REG(26)
-	JMP_TO_MOV_TO_BANK0_REG(27)
-	JMP_TO_MOV_TO_BANK0_REG(28)
-	JMP_TO_MOV_TO_BANK0_REG(29)
-	JMP_TO_MOV_TO_BANK0_REG(30)
-	JMP_TO_MOV_TO_BANK0_REG(31)
-	MOV_TO_REG(32)
-	MOV_TO_REG(33)
-	MOV_TO_REG(34)
-	MOV_TO_REG(35)
-	MOV_TO_REG(36)
-	MOV_TO_REG(37)
-	MOV_TO_REG(38)
-	MOV_TO_REG(39)
-	MOV_TO_REG(40)
-	MOV_TO_REG(41)
-	MOV_TO_REG(42)
-	MOV_TO_REG(43)
-	MOV_TO_REG(44)
-	MOV_TO_REG(45)
-	MOV_TO_REG(46)
-	MOV_TO_REG(47)
-	MOV_TO_REG(48)
-	MOV_TO_REG(49)
-	MOV_TO_REG(50)
-	MOV_TO_REG(51)
-	MOV_TO_REG(52)
-	MOV_TO_REG(53)
-	MOV_TO_REG(54)
-	MOV_TO_REG(55)
-	MOV_TO_REG(56)
-	MOV_TO_REG(57)
-	MOV_TO_REG(58)
-	MOV_TO_REG(59)
-	MOV_TO_REG(60)
-	MOV_TO_REG(61)
-	MOV_TO_REG(62)
-	MOV_TO_REG(63)
-	MOV_TO_REG(64)
-	MOV_TO_REG(65)
-	MOV_TO_REG(66)
-	MOV_TO_REG(67)
-	MOV_TO_REG(68)
-	MOV_TO_REG(69)
-	MOV_TO_REG(70)
-	MOV_TO_REG(71)
-	MOV_TO_REG(72)
-	MOV_TO_REG(73)
-	MOV_TO_REG(74)
-	MOV_TO_REG(75)
-	MOV_TO_REG(76)
-	MOV_TO_REG(77)
-	MOV_TO_REG(78)
-	MOV_TO_REG(79)
-	MOV_TO_REG(80)
-	MOV_TO_REG(81)
-	MOV_TO_REG(82)
-	MOV_TO_REG(83)
-	MOV_TO_REG(84)
-	MOV_TO_REG(85)
-	MOV_TO_REG(86)
-	MOV_TO_REG(87)
-	MOV_TO_REG(88)
-	MOV_TO_REG(89)
-	MOV_TO_REG(90)
-	MOV_TO_REG(91)
-	MOV_TO_REG(92)
-	MOV_TO_REG(93)
-	MOV_TO_REG(94)
-	MOV_TO_REG(95)
-	MOV_TO_REG(96)
-	MOV_TO_REG(97)
-	MOV_TO_REG(98)
-	MOV_TO_REG(99)
-	MOV_TO_REG(100)
-	MOV_TO_REG(101)
-	MOV_TO_REG(102)
-	MOV_TO_REG(103)
-	MOV_TO_REG(104)
-	MOV_TO_REG(105)
-	MOV_TO_REG(106)
-	MOV_TO_REG(107)
-	MOV_TO_REG(108)
-	MOV_TO_REG(109)
-	MOV_TO_REG(110)
-	MOV_TO_REG(111)
-	MOV_TO_REG(112)
-	MOV_TO_REG(113)
-	MOV_TO_REG(114)
-	MOV_TO_REG(115)
-	MOV_TO_REG(116)
-	MOV_TO_REG(117)
-	MOV_TO_REG(118)
-	MOV_TO_REG(119)
-	MOV_TO_REG(120)
-	MOV_TO_REG(121)
-	MOV_TO_REG(122)
-	MOV_TO_REG(123)
-	MOV_TO_REG(124)
-	MOV_TO_REG(125)
-	MOV_TO_REG(126)
-	MOV_TO_REG(127)
-END(asm_mov_to_reg)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
deleted file mode 100644
index b039874..0000000
--- a/arch/ia64/kvm/process.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * process.c: handle interruption inject for guests.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- *  	Shaofan Li (Susue Li) <susie.li@intel.com>
- *  	Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
- *  	Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- *  	Xiantao Zhang (xiantao.zhang@intel.com)
- */
-#include "vcpu.h"
-
-#include <asm/pal.h>
-#include <asm/sal.h>
-#include <asm/fpswa.h>
-#include <asm/kregs.h>
-#include <asm/tlb.h>
-
-fpswa_interface_t *vmm_fpswa_interface;
-
-#define IA64_VHPT_TRANS_VECTOR			0x0000
-#define IA64_INST_TLB_VECTOR			0x0400
-#define IA64_DATA_TLB_VECTOR			0x0800
-#define IA64_ALT_INST_TLB_VECTOR		0x0c00
-#define IA64_ALT_DATA_TLB_VECTOR		0x1000
-#define IA64_DATA_NESTED_TLB_VECTOR		0x1400
-#define IA64_INST_KEY_MISS_VECTOR		0x1800
-#define IA64_DATA_KEY_MISS_VECTOR		0x1c00
-#define IA64_DIRTY_BIT_VECTOR			0x2000
-#define IA64_INST_ACCESS_BIT_VECTOR		0x2400
-#define IA64_DATA_ACCESS_BIT_VECTOR		0x2800
-#define IA64_BREAK_VECTOR			0x2c00
-#define IA64_EXTINT_VECTOR			0x3000
-#define IA64_PAGE_NOT_PRESENT_VECTOR		0x5000
-#define IA64_KEY_PERMISSION_VECTOR		0x5100
-#define IA64_INST_ACCESS_RIGHTS_VECTOR		0x5200
-#define IA64_DATA_ACCESS_RIGHTS_VECTOR		0x5300
-#define IA64_GENEX_VECTOR			0x5400
-#define IA64_DISABLED_FPREG_VECTOR		0x5500
-#define IA64_NAT_CONSUMPTION_VECTOR		0x5600
-#define IA64_SPECULATION_VECTOR		0x5700 /* UNUSED */
-#define IA64_DEBUG_VECTOR			0x5900
-#define IA64_UNALIGNED_REF_VECTOR		0x5a00
-#define IA64_UNSUPPORTED_DATA_REF_VECTOR	0x5b00
-#define IA64_FP_FAULT_VECTOR			0x5c00
-#define IA64_FP_TRAP_VECTOR			0x5d00
-#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 	0x5e00
-#define IA64_TAKEN_BRANCH_TRAP_VECTOR		0x5f00
-#define IA64_SINGLE_STEP_TRAP_VECTOR		0x6000
-
-/* SDM vol2 5.5 - IVA based interruption handling */
-#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
-			IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT |    	\
-			IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
-
-#define DOMN_PAL_REQUEST    0x110000
-#define DOMN_SAL_REQUEST    0x110001
-
-static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
-	0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
-	0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
-	0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
-	0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
-	0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
-	0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
-	0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
-};
-
-static void collect_interruption(struct kvm_vcpu *vcpu)
-{
-	u64 ipsr;
-	u64 vdcr;
-	u64 vifs;
-	unsigned long vpsr;
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	vpsr = vcpu_get_psr(vcpu);
-	vcpu_bsw0(vcpu);
-	if (vpsr & IA64_PSR_IC) {
-
-		/* Sync mpsr id/da/dd/ss/ed bits to vipsr
-		 * since after guest do rfi, we still want these bits on in
-		 * mpsr
-		 */
-
-		ipsr = regs->cr_ipsr;
-		vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
-					| IA64_PSR_DD | IA64_PSR_SS
-					| IA64_PSR_ED));
-		vcpu_set_ipsr(vcpu, vpsr);
-
-		/* Currently, for trap, we do not advance IIP to next
-		 * instruction. That's because we assume caller already
-		 * set up IIP correctly
-		 */
-
-		vcpu_set_iip(vcpu , regs->cr_iip);
-
-		/* set vifs.v to zero */
-		vifs = VCPU(vcpu, ifs);
-		vifs &= ~IA64_IFS_V;
-		vcpu_set_ifs(vcpu, vifs);
-
-		vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
-	}
-
-	vdcr = VCPU(vcpu, dcr);
-
-	/* Set guest psr
-	 * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
-	 * be: set to the value of dcr.be
-	 * pp: set to the value of dcr.pp
-	 */
-	vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
-	vpsr |= (vdcr & IA64_DCR_BE);
-
-	/* VDCR pp bit position is different from VPSR pp bit */
-	if (vdcr & IA64_DCR_PP) {
-		vpsr |= IA64_PSR_PP;
-	} else {
-		vpsr &= ~IA64_PSR_PP;
-	}
-
-	vcpu_set_psr(vcpu, vpsr);
-
-}
-
-void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
-{
-	u64 viva;
-	struct kvm_pt_regs *regs;
-	union ia64_isr pt_isr;
-
-	regs = vcpu_regs(vcpu);
-
-	/* clear cr.isr.ir (incomplete register frame)*/
-	pt_isr.val = VMX(vcpu, cr_isr);
-	pt_isr.ir = 0;
-	VMX(vcpu, cr_isr) = pt_isr.val;
-
-	collect_interruption(vcpu);
-
-	viva = vcpu_get_iva(vcpu);
-	regs->cr_iip = viva + vec;
-}
-
-static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
-{
-	union ia64_rr rr, rr1;
-
-	rr.val = vcpu_get_rr(vcpu, ifa);
-	rr1.val = 0;
-	rr1.ps = rr.ps;
-	rr1.rid = rr.rid;
-	return (rr1.val);
-}
-
-/*
- * Set vIFA & vITIR & vIHA, when vPSR.ic =1
- * Parameter:
- *  set_ifa: if true, set vIFA
- *  set_itir: if true, set vITIR
- *  set_iha: if true, set vIHA
- */
-void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
-		int set_ifa, int set_itir, int set_iha)
-{
-	long vpsr;
-	u64 value;
-
-	vpsr = VCPU(vcpu, vpsr);
-	/* Vol2, Table 8-1 */
-	if (vpsr & IA64_PSR_IC) {
-		if (set_ifa)
-			vcpu_set_ifa(vcpu, vadr);
-		if (set_itir) {
-			value = vcpu_get_itir_on_fault(vcpu, vadr);
-			vcpu_set_itir(vcpu, value);
-		}
-
-		if (set_iha) {
-			value = vcpu_thash(vcpu, vadr);
-			vcpu_set_iha(vcpu, value);
-		}
-	}
-}
-
-/*
- * Data TLB Fault
- *  @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	/* If vPSR.ic, IFA, ITIR, IHA */
-	set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
-	inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
-}
-
-/*
- * Instruction TLB Fault
- *  @ Instruction TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	/* If vPSR.ic, IFA, ITIR, IHA */
-	set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
-	inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
-}
-
-/*
- * Data Nested TLB Fault
- *  @ Data Nested TLB Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void nested_dtlb(struct kvm_vcpu *vcpu)
-{
-	inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
-}
-
-/*
- * Alternate Data TLB Fault
- *  @ Alternate Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
-	inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
-}
-
-/*
- * Data TLB Fault
- *  @ Data TLB vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
-	inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
-}
-
-/* Deal with:
- *  VHPT Translation Vector
- */
-static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	/* If vPSR.ic, IFA, ITIR, IHA*/
-	set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
-	inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
-}
-
-/*
- * VHPT Instruction Fault
- *  @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	_vhpt_fault(vcpu, vadr);
-}
-
-/*
- * VHPT Data Fault
- *  @ VHPT Translation vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	_vhpt_fault(vcpu, vadr);
-}
-
-/*
- * Deal with:
- *  General Exception vector
- */
-void _general_exception(struct kvm_vcpu *vcpu)
-{
-	inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
-}
-
-/*
- * Illegal Operation Fault
- *  @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void illegal_op(struct kvm_vcpu *vcpu)
-{
-	_general_exception(vcpu);
-}
-
-/*
- * Illegal Dependency Fault
- *  @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void illegal_dep(struct kvm_vcpu *vcpu)
-{
-	_general_exception(vcpu);
-}
-
-/*
- * Reserved Register/Field Fault
- *  @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void rsv_reg_field(struct kvm_vcpu *vcpu)
-{
-	_general_exception(vcpu);
-}
-/*
- * Privileged Operation Fault
- *  @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-
-void privilege_op(struct kvm_vcpu *vcpu)
-{
-	_general_exception(vcpu);
-}
-
-/*
- * Unimplement Data Address Fault
- *  @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void unimpl_daddr(struct kvm_vcpu *vcpu)
-{
-	_general_exception(vcpu);
-}
-
-/*
- * Privileged Register Fault
- *  @ General Exception Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void privilege_reg(struct kvm_vcpu *vcpu)
-{
-	_general_exception(vcpu);
-}
-
-/* Deal with
- *  Nat consumption vector
- * Parameter:
- *  vaddr: Optional, if t == REGISTER
- */
-static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
-						enum tlb_miss_type t)
-{
-	/* If vPSR.ic && t == DATA/INST, IFA */
-	if (t == DATA || t == INSTRUCTION) {
-		/* IFA */
-		set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
-	}
-
-	inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
-}
-
-/*
- * Instruction Nat Page Consumption Fault
- *  @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	_nat_consumption_fault(vcpu, vadr, INSTRUCTION);
-}
-
-/*
- * Register Nat Consumption Fault
- *  @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void rnat_consumption(struct kvm_vcpu *vcpu)
-{
-	_nat_consumption_fault(vcpu, 0, REGISTER);
-}
-
-/*
- * Data Nat Page Consumption Fault
- *  @ Nat Consumption Vector
- * Refer to SDM Vol2 Table 5-6 & 8-1
- */
-void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	_nat_consumption_fault(vcpu, vadr, DATA);
-}
-
-/* Deal with
- *  Page not present vector
- */
-static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	/* If vPSR.ic, IFA, ITIR */
-	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
-	inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
-}
-
-void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	__page_not_present(vcpu, vadr);
-}
-
-void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	__page_not_present(vcpu, vadr);
-}
-
-/* Deal with
- *  Data access rights vector
- */
-void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	/* If vPSR.ic, IFA, ITIR */
-	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
-	inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
-}
-
-fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
-		unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
-		unsigned long *ifs, struct kvm_pt_regs *regs)
-{
-	fp_state_t fp_state;
-	fpswa_ret_t ret;
-	struct kvm_vcpu *vcpu = current_vcpu;
-
-	uint64_t old_rr7 = ia64_get_rr(7UL<<61);
-
-	if (!vmm_fpswa_interface)
-		return (fpswa_ret_t) {-1, 0, 0, 0};
-
-	memset(&fp_state, 0, sizeof(fp_state_t));
-
-	/*
-	 * compute fp_state.  only FP registers f6 - f11 are used by the
-	 * vmm, so set those bits in the mask and set the low volatile
-	 * pointer to point to these registers.
-	 */
-	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
-
-	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
-
-   /*
-	 * unsigned long (*EFI_FPSWA) (
-	 *      unsigned long    trap_type,
-	 *      void             *Bundle,
-	 *      unsigned long    *pipsr,
-	 *      unsigned long    *pfsr,
-	 *      unsigned long    *pisr,
-	 *      unsigned long    *ppreds,
-	 *      unsigned long    *pifs,
-	 *      void             *fp_state);
-	 */
-	/*Call host fpswa interface directly to virtualize
-	 *guest fpswa request!
-	 */
-	ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
-	ia64_srlz_d();
-
-	ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
-			ipsr, fpsr, isr, pr, ifs, &fp_state);
-	ia64_set_rr(7UL << 61, old_rr7);
-	ia64_srlz_d();
-	return ret;
-}
-
-/*
- * Handle floating-point assist faults and traps for domain.
- */
-unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
-					unsigned long isr)
-{
-	struct kvm_vcpu *v = current_vcpu;
-	IA64_BUNDLE bundle;
-	unsigned long fault_ip;
-	fpswa_ret_t ret;
-
-	fault_ip = regs->cr_iip;
-	/*
-	 * When the FP trap occurs, the trapping instruction is completed.
-	 * If ipsr.ri == 0, there is the trapping instruction in previous
-	 * bundle.
-	 */
-	if (!fp_fault && (ia64_psr(regs)->ri == 0))
-		fault_ip -= 16;
-
-	if (fetch_code(v, fault_ip, &bundle))
-		return -EAGAIN;
-
-	if (!bundle.i64[0] && !bundle.i64[1])
-		return -EACCES;
-
-	ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
-			&isr, &regs->pr, &regs->cr_ifs, regs);
-	return ret.status;
-}
-
-void reflect_interruption(u64 ifa, u64 isr, u64 iim,
-		u64 vec, struct kvm_pt_regs *regs)
-{
-	u64 vector;
-	int status ;
-	struct kvm_vcpu *vcpu = current_vcpu;
-	u64 vpsr = VCPU(vcpu, vpsr);
-
-	vector = vec2off[vec];
-
-	if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
-		panic_vm(vcpu, "Interruption with vector :0x%lx occurs "
-						"with psr.ic = 0\n", vector);
-		return;
-	}
-
-	switch (vec) {
-	case 32: 	/*IA64_FP_FAULT_VECTOR*/
-		status = vmm_handle_fpu_swa(1, regs, isr);
-		if (!status) {
-			vcpu_increment_iip(vcpu);
-			return;
-		} else if (-EAGAIN == status)
-			return;
-		break;
-	case 33:	/*IA64_FP_TRAP_VECTOR*/
-		status = vmm_handle_fpu_swa(0, regs, isr);
-		if (!status)
-			return ;
-		break;
-	}
-
-	VCPU(vcpu, isr) = isr;
-	VCPU(vcpu, iipa) = regs->cr_iip;
-	if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
-		VCPU(vcpu, iim) = iim;
-	else
-		set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
-
-	inject_guest_interruption(vcpu, vector);
-}
-
-static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
-						unsigned long arg)
-{
-	struct thash_data *data;
-	unsigned long gpa, poff;
-
-	if (!is_physical_mode(vcpu)) {
-		/* Depends on caller to provide the DTR or DTC mapping.*/
-		data = vtlb_lookup(vcpu, arg, D_TLB);
-		if (data)
-			gpa = data->page_flags & _PAGE_PPN_MASK;
-		else {
-			data = vhpt_lookup(arg);
-			if (!data)
-				return 0;
-			gpa = data->gpaddr & _PAGE_PPN_MASK;
-		}
-
-		poff = arg & (PSIZE(data->ps) - 1);
-		arg = PAGEALIGN(gpa, data->ps) | poff;
-	}
-	arg = kvm_gpa_to_mpa(arg << 1 >> 1);
-
-	return (unsigned long)__va(arg);
-}
-
-static void set_pal_call_data(struct kvm_vcpu *vcpu)
-{
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-	unsigned long gr28 = vcpu_get_gr(vcpu, 28);
-	unsigned long gr29 = vcpu_get_gr(vcpu, 29);
-	unsigned long gr30 = vcpu_get_gr(vcpu, 30);
-
-	/*FIXME:For static and stacked convention, firmware
-	 * has put the parameters in gr28-gr31 before
-	 * break to vmm  !!*/
-
-	switch (gr28) {
-	case PAL_PERF_MON_INFO:
-	case PAL_HALT_INFO:
-		p->u.pal_data.gr29 =  kvm_trans_pal_call_args(vcpu, gr29);
-		p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
-		break;
-	case PAL_BRAND_INFO:
-		p->u.pal_data.gr29 = gr29;
-		p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
-		break;
-	default:
-		p->u.pal_data.gr29 = gr29;
-		p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
-	}
-	p->u.pal_data.gr28 = gr28;
-	p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
-
-	p->exit_reason = EXIT_REASON_PAL_CALL;
-}
-
-static void get_pal_call_result(struct kvm_vcpu *vcpu)
-{
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
-	if (p->exit_reason == EXIT_REASON_PAL_CALL) {
-		vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
-		vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
-		vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
-		vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
-	} else
-		panic_vm(vcpu, "Mis-set for exit reason!\n");
-}
-
-static void set_sal_call_data(struct kvm_vcpu *vcpu)
-{
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
-	p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
-	p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
-	p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
-	p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
-	p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
-	p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
-	p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
-	p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
-	p->exit_reason = EXIT_REASON_SAL_CALL;
-}
-
-static void get_sal_call_result(struct kvm_vcpu *vcpu)
-{
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
-	if (p->exit_reason == EXIT_REASON_SAL_CALL) {
-		vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
-		vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
-		vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
-		vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
-	} else
-		panic_vm(vcpu, "Mis-set for exit reason!\n");
-}
-
-void  kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
-		unsigned long isr, unsigned long iim)
-{
-	struct kvm_vcpu *v = current_vcpu;
-	long psr;
-
-	if (ia64_psr(regs)->cpl == 0) {
-		/* Allow hypercalls only when cpl = 0.  */
-		if (iim == DOMN_PAL_REQUEST) {
-			local_irq_save(psr);
-			set_pal_call_data(v);
-			vmm_transition(v);
-			get_pal_call_result(v);
-			vcpu_increment_iip(v);
-			local_irq_restore(psr);
-			return;
-		} else if (iim == DOMN_SAL_REQUEST) {
-			local_irq_save(psr);
-			set_sal_call_data(v);
-			vmm_transition(v);
-			get_sal_call_result(v);
-			vcpu_increment_iip(v);
-			local_irq_restore(psr);
-			return;
-		}
-	}
-	reflect_interruption(ifa, isr, iim, 11, regs);
-}
-
-void check_pending_irq(struct kvm_vcpu *vcpu)
-{
-	int  mask, h_pending, h_inservice;
-	u64 isr;
-	unsigned long  vpsr;
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	h_pending = highest_pending_irq(vcpu);
-	if (h_pending == NULL_VECTOR) {
-		update_vhpi(vcpu, NULL_VECTOR);
-		return;
-	}
-	h_inservice = highest_inservice_irq(vcpu);
-
-	vpsr = VCPU(vcpu, vpsr);
-	mask = irq_masked(vcpu, h_pending, h_inservice);
-	if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
-		isr = vpsr & IA64_PSR_RI;
-		update_vhpi(vcpu, h_pending);
-		reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
-	} else if (mask == IRQ_MASKED_BY_INSVC) {
-		if (VCPU(vcpu, vhpi))
-			update_vhpi(vcpu, NULL_VECTOR);
-	} else {
-		/* masked by vpsr.i or vtpr.*/
-		update_vhpi(vcpu, h_pending);
-	}
-}
-
-static void generate_exirq(struct kvm_vcpu *vcpu)
-{
-	unsigned  vpsr;
-	uint64_t isr;
-
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	vpsr = VCPU(vcpu, vpsr);
-	isr = vpsr & IA64_PSR_RI;
-	if (!(vpsr & IA64_PSR_IC))
-		panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n");
-	reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
-}
-
-void vhpi_detection(struct kvm_vcpu *vcpu)
-{
-	uint64_t    threshold, vhpi;
-	union ia64_tpr       vtpr;
-	struct ia64_psr vpsr;
-
-	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-	vtpr.val = VCPU(vcpu, tpr);
-
-	threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
-	vhpi = VCPU(vcpu, vhpi);
-	if (vhpi > threshold) {
-		/* interrupt actived*/
-		generate_exirq(vcpu);
-	}
-}
-
-void leave_hypervisor_tail(void)
-{
-	struct kvm_vcpu *v = current_vcpu;
-
-	if (VMX(v, timer_check)) {
-		VMX(v, timer_check) = 0;
-		if (VMX(v, itc_check)) {
-			if (vcpu_get_itc(v) > VCPU(v, itm)) {
-				if (!(VCPU(v, itv) & (1 << 16))) {
-					vcpu_pend_interrupt(v, VCPU(v, itv)
-							& 0xff);
-					VMX(v, itc_check) = 0;
-				} else {
-					v->arch.timer_pending = 1;
-				}
-				VMX(v, last_itc) = VCPU(v, itm) + 1;
-			}
-		}
-	}
-
-	rmb();
-	if (v->arch.irq_new_pending) {
-		v->arch.irq_new_pending = 0;
-		VMX(v, irq_check) = 0;
-		check_pending_irq(v);
-		return;
-	}
-	if (VMX(v, irq_check)) {
-		VMX(v, irq_check) = 0;
-		vhpi_detection(v);
-	}
-}
-
-static inline void handle_lds(struct kvm_pt_regs *regs)
-{
-	regs->cr_ipsr |= IA64_PSR_ED;
-}
-
-void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
-{
-	unsigned long pte;
-	union ia64_rr rr;
-
-	rr.val = ia64_get_rr(vadr);
-	pte =  vadr & _PAGE_PPN_MASK;
-	pte = pte | PHY_PAGE_WB;
-	thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
-	return;
-}
-
-void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
-{
-	unsigned long vpsr;
-	int type;
-
-	u64 vhpt_adr, gppa, pteval, rr, itir;
-	union ia64_isr misr;
-	union ia64_pta vpta;
-	struct thash_data *data;
-	struct kvm_vcpu *v = current_vcpu;
-
-	vpsr = VCPU(v, vpsr);
-	misr.val = VMX(v, cr_isr);
-
-	type = vec;
-
-	if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
-		if (vec == 2) {
-			if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
-				emulate_io_inst(v, ((vadr << 1) >> 1), 4);
-				return;
-			}
-		}
-		physical_tlb_miss(v, vadr, type);
-		return;
-	}
-	data = vtlb_lookup(v, vadr, type);
-	if (data != 0) {
-		if (type == D_TLB) {
-			gppa = (vadr & ((1UL << data->ps) - 1))
-				+ (data->ppn >> (data->ps - 12) << data->ps);
-			if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
-				if (data->pl >= ((regs->cr_ipsr >>
-						IA64_PSR_CPL0_BIT) & 3))
-					emulate_io_inst(v, gppa, data->ma);
-				else {
-					vcpu_set_isr(v, misr.val);
-					data_access_rights(v, vadr);
-				}
-				return ;
-			}
-		}
-		thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
-
-	} else if (type == D_TLB) {
-		if (misr.sp) {
-			handle_lds(regs);
-			return;
-		}
-
-		rr = vcpu_get_rr(v, vadr);
-		itir = rr & (RR_RID_MASK | RR_PS_MASK);
-
-		if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
-			if (vpsr & IA64_PSR_IC) {
-				vcpu_set_isr(v, misr.val);
-				alt_dtlb(v, vadr);
-			} else {
-				nested_dtlb(v);
-			}
-			return ;
-		}
-
-		vpta.val = vcpu_get_pta(v);
-		/* avoid recursively walking (short format) VHPT */
-
-		vhpt_adr = vcpu_thash(v, vadr);
-		if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
-			/* VHPT successfully read.  */
-			if (!(pteval & _PAGE_P)) {
-				if (vpsr & IA64_PSR_IC) {
-					vcpu_set_isr(v, misr.val);
-					dtlb_fault(v, vadr);
-				} else {
-					nested_dtlb(v);
-				}
-			} else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
-				thash_purge_and_insert(v, pteval, itir,
-								vadr, D_TLB);
-			} else if (vpsr & IA64_PSR_IC) {
-				vcpu_set_isr(v, misr.val);
-				dtlb_fault(v, vadr);
-			} else {
-				nested_dtlb(v);
-			}
-		} else {
-			/* Can't read VHPT.  */
-			if (vpsr & IA64_PSR_IC) {
-				vcpu_set_isr(v, misr.val);
-				dvhpt_fault(v, vadr);
-			} else {
-				nested_dtlb(v);
-			}
-		}
-	} else if (type == I_TLB) {
-		if (!(vpsr & IA64_PSR_IC))
-			misr.ni = 1;
-		if (!vhpt_enabled(v, vadr, INST_REF)) {
-			vcpu_set_isr(v, misr.val);
-			alt_itlb(v, vadr);
-			return;
-		}
-
-		vpta.val = vcpu_get_pta(v);
-
-		vhpt_adr = vcpu_thash(v, vadr);
-		if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
-			/* VHPT successfully read.  */
-			if (pteval & _PAGE_P) {
-				if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
-					vcpu_set_isr(v, misr.val);
-					itlb_fault(v, vadr);
-					return ;
-				}
-				rr = vcpu_get_rr(v, vadr);
-				itir = rr & (RR_RID_MASK | RR_PS_MASK);
-				thash_purge_and_insert(v, pteval, itir,
-							vadr, I_TLB);
-			} else {
-				vcpu_set_isr(v, misr.val);
-				inst_page_not_present(v, vadr);
-			}
-		} else {
-			vcpu_set_isr(v, misr.val);
-			ivhpt_fault(v, vadr);
-		}
-	}
-}
-
-void kvm_vexirq(struct kvm_vcpu *vcpu)
-{
-	u64 vpsr, isr;
-	struct kvm_pt_regs *regs;
-
-	regs = vcpu_regs(vcpu);
-	vpsr = VCPU(vcpu, vpsr);
-	isr = vpsr & IA64_PSR_RI;
-	reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
-}
-
-void kvm_ia64_handle_irq(struct kvm_vcpu *v)
-{
-	struct exit_ctl_data *p = &v->arch.exit_data;
-	long psr;
-
-	local_irq_save(psr);
-	p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
-	vmm_transition(v);
-	local_irq_restore(psr);
-
-	VMX(v, timer_check) = 1;
-
-}
-
-static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
-{
-	u64 oldrid, moldrid, oldpsbits, vaddr;
-	struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
-	vaddr = p->vaddr;
-
-	oldrid = VMX(v, vrr[0]);
-	VMX(v, vrr[0]) = p->rr;
-	oldpsbits = VMX(v, psbits[0]);
-	VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
-	moldrid = ia64_get_rr(0x0);
-	ia64_set_rr(0x0, vrrtomrr(p->rr));
-	ia64_srlz_d();
-
-	vaddr = PAGEALIGN(vaddr, p->ps);
-	thash_purge_entries_remote(v, vaddr, p->ps);
-
-	VMX(v, vrr[0]) = oldrid;
-	VMX(v, psbits[0]) = oldpsbits;
-	ia64_set_rr(0x0, moldrid);
-	ia64_dv_serialize_data();
-}
-
-static void vcpu_do_resume(struct kvm_vcpu *vcpu)
-{
-	/*Re-init VHPT and VTLB once from resume*/
-	vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
-	thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
-	vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
-	thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
-
-	ia64_set_pta(vcpu->arch.vhpt.pta.val);
-}
-
-static void vmm_sanity_check(struct kvm_vcpu *vcpu)
-{
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-
-	if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
-		panic_vm(vcpu, "Failed to do vmm sanity check,"
-			"it maybe caused by crashed vmm!!\n\n");
-	}
-}
-
-static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
-{
-	vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/
-
-	if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
-		vcpu_do_resume(vcpu);
-		return;
-	}
-
-	if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
-		thash_purge_all(vcpu);
-		return;
-	}
-
-	if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
-		while (vcpu->arch.ptc_g_count > 0)
-			ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
-	}
-}
-
-void vmm_transition(struct kvm_vcpu *vcpu)
-{
-	ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
-			1, 0, 0, 0, 0, 0);
-	vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
-	ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
-						1, 0, 0, 0, 0, 0);
-	kvm_do_resume_op(vcpu);
-}
-
-void vmm_panic_handler(u64 vec)
-{
-	struct kvm_vcpu *vcpu = current_vcpu;
-	vmm_sanity = 0;
-	panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
-			vec2off[vec]);
-}
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S
deleted file mode 100644
index 30897d4..0000000
--- a/arch/ia64/kvm/trampoline.S
+++ /dev/null
@@ -1,1038 +0,0 @@
-/* Save all processor states
- *
- * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com>
- * Copyright (c) 2007 Anthony Xu   <anthony.xu@intel.com>
- */
-
-#include <asm/asmmacro.h>
-#include "asm-offsets.h"
-
-
-#define CTX(name)    VMM_CTX_##name##_OFFSET
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_BRANCH_REGS			\
-	add	r2 = CTX(B0),r32;		\
-	add	r3 = CTX(B1),r32;		\
-	mov	r16 = b0;			\
-	mov	r17 = b1;			\
-	;;					\
-	st8	[r2]=r16,16;			\
-	st8	[r3]=r17,16;			\
-	;;					\
-	mov	r16 = b2;			\
-	mov	r17 = b3;			\
-	;;					\
-	st8	[r2]=r16,16;			\
-	st8	[r3]=r17,16;			\
-	;;					\
-	mov	r16 = b4;			\
-	mov	r17 = b5;			\
-	;;					\
-	st8	[r2]=r16;   			\
-	st8	[r3]=r17;   			\
-	;;
-
-	/*
-	 *	r33:		context_t base address
-	 */
-#define	RESTORE_BRANCH_REGS			\
-	add	r2 = CTX(B0),r33;		\
-	add	r3 = CTX(B1),r33;		\
-	;;					\
-	ld8	r16=[r2],16;			\
-	ld8	r17=[r3],16;			\
-	;;					\
-	mov	b0 = r16;			\
-	mov	b1 = r17;			\
-	;;					\
-	ld8	r16=[r2],16;			\
-	ld8	r17=[r3],16;			\
-	;;					\
-	mov	b2 = r16;			\
-	mov	b3 = r17;			\
-	;;					\
-	ld8	r16=[r2];   			\
-	ld8	r17=[r3];   			\
-	;;					\
-	mov	b4=r16;				\
-	mov	b5=r17;				\
-	;;
-
-
-	/*
-	 *	r32: context_t base address
-	 *	bsw == 1
-	 *	Save all bank1 general registers, r4 ~ r7
-	 */
-#define	SAVE_GENERAL_REGS			\
-	add	r2=CTX(R4),r32;			\
-	add	r3=CTX(R5),r32;			\
-	;;					\
-.mem.offset 0,0;        			\
-	st8.spill	[r2]=r4,16;		\
-.mem.offset 8,0;        			\
-	st8.spill	[r3]=r5,16;		\
-	;;					\
-.mem.offset 0,0;        			\
-	st8.spill	[r2]=r6,48;		\
-.mem.offset 8,0;        			\
-	st8.spill	[r3]=r7,48;		\
-	;;                          		\
-.mem.offset 0,0;        			\
-    st8.spill    [r2]=r12;			\
-.mem.offset 8,0;				\
-    st8.spill    [r3]=r13;			\
-    ;;
-
-	/*
-	 *	r33: context_t base address
-	 *	bsw == 1
-	 */
-#define	RESTORE_GENERAL_REGS			\
-	add	r2=CTX(R4),r33;			\
-	add	r3=CTX(R5),r33;			\
-	;;					\
-	ld8.fill	r4=[r2],16;		\
-	ld8.fill	r5=[r3],16;		\
-	;;					\
-	ld8.fill	r6=[r2],48;		\
-	ld8.fill	r7=[r3],48;		\
-	;;					\
-	ld8.fill    r12=[r2];			\
-	ld8.fill    r13 =[r3];			\
-	;;
-
-
-
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_KERNEL_REGS			\
-	add	r2 = CTX(KR0),r32;		\
-	add	r3 = CTX(KR1),r32;		\
-	mov	r16 = ar.k0;			\
-	mov	r17 = ar.k1;			\
-	;;		        		\
-	st8	[r2] = r16,16;			\
-	st8	[r3] = r17,16;			\
-	;;		        		\
-	mov	r16 = ar.k2;			\
-	mov	r17 = ar.k3;			\
-	;;		        		\
-	st8	[r2] = r16,16;			\
-	st8	[r3] = r17,16;			\
-	;;					\
-	mov	r16 = ar.k4;			\
-	mov	r17 = ar.k5;			\
-	;;				    	\
-	st8	[r2] = r16,16;			\
-	st8	[r3] = r17,16;			\
-	;;					\
-	mov	r16 = ar.k6;			\
-	mov	r17 = ar.k7;			\
-	;;		    			\
-	st8	[r2] = r16;     		\
-	st8	[r3] = r17;			\
-	;;
-
-
-
-	/*
-	 *	r33:		context_t base address
-	 */
-#define	RESTORE_KERNEL_REGS			\
-	add	r2 = CTX(KR0),r33;		\
-	add	r3 = CTX(KR1),r33;		\
-	;;		    			\
-	ld8	r16=[r2],16;     		\
-	ld8	r17=[r3],16;			\
-	;;					\
-	mov	ar.k0=r16;  			\
-	mov	ar.k1=r17;	    		\
-	;;		        		\
-	ld8	r16=[r2],16;			\
-	ld8	r17=[r3],16;			\
-	;;		        		\
-	mov	ar.k2=r16;   			\
-	mov	ar.k3=r17;	    		\
-	;;		        		\
-	ld8	r16=[r2],16;			\
-	ld8	r17=[r3],16;			\
-	;;					\
-	mov	ar.k4=r16;			\
-	mov	ar.k5=r17;	    		\
-	;;				    	\
-	ld8	r16=[r2],16;			\
-	ld8	r17=[r3],16;			\
-	;;					\
-	mov	ar.k6=r16;  			\
-	mov	ar.k7=r17;	    		\
-	;;
-
-
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_APP_REGS				\
-	add  r2 = CTX(BSPSTORE),r32;		\
-	mov  r16 = ar.bspstore;			\
-	;;					\
-	st8  [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\
-	mov  r16 = ar.rnat;			\
-	;;					\
-	st8  [r2] = r16,CTX(FCR)-CTX(RNAT);	\
-	mov  r16 = ar.fcr;			\
-	;;					\
-	st8  [r2] = r16,CTX(EFLAG)-CTX(FCR);	\
-	mov  r16 = ar.eflag;			\
-	;;					\
-	st8  [r2] = r16,CTX(CFLG)-CTX(EFLAG);	\
-	mov  r16 = ar.cflg;			\
-	;;					\
-	st8  [r2] = r16,CTX(FSR)-CTX(CFLG);	\
-	mov  r16 = ar.fsr;			\
-	;;					\
-	st8  [r2] = r16,CTX(FIR)-CTX(FSR);	\
-	mov  r16 = ar.fir;			\
-	;;					\
-	st8  [r2] = r16,CTX(FDR)-CTX(FIR);	\
-	mov  r16 = ar.fdr;			\
-	;;					\
-	st8  [r2] = r16,CTX(UNAT)-CTX(FDR);	\
-	mov  r16 = ar.unat;			\
-	;;					\
-	st8  [r2] = r16,CTX(FPSR)-CTX(UNAT);	\
-	mov  r16 = ar.fpsr;			\
-	;;					\
-	st8  [r2] = r16,CTX(PFS)-CTX(FPSR);	\
-	mov  r16 = ar.pfs;			\
-	;;					\
-	st8  [r2] = r16,CTX(LC)-CTX(PFS);	\
-	mov  r16 = ar.lc;			\
-	;;					\
-	st8  [r2] = r16;			\
-	;;
-
-	/*
-	 *	r33:		context_t base address
-	 */
-#define	RESTORE_APP_REGS			\
-	add  r2=CTX(BSPSTORE),r33;		\
-	;;					\
-	ld8  r16=[r2],CTX(RNAT)-CTX(BSPSTORE);	\
-	;;					\
-	mov  ar.bspstore=r16;			\
-	ld8  r16=[r2],CTX(FCR)-CTX(RNAT);	\
-	;;					\
-	mov  ar.rnat=r16;			\
-	ld8  r16=[r2],CTX(EFLAG)-CTX(FCR);	\
-	;;					\
-	mov  ar.fcr=r16;			\
-	ld8  r16=[r2],CTX(CFLG)-CTX(EFLAG);	\
-	;;					\
-	mov  ar.eflag=r16;			\
-	ld8  r16=[r2],CTX(FSR)-CTX(CFLG);	\
-	;;					\
-	mov  ar.cflg=r16;			\
-	ld8  r16=[r2],CTX(FIR)-CTX(FSR);	\
-	;;					\
-	mov  ar.fsr=r16;			\
-	ld8  r16=[r2],CTX(FDR)-CTX(FIR);	\
-	;;					\
-	mov  ar.fir=r16;			\
-	ld8  r16=[r2],CTX(UNAT)-CTX(FDR);	\
-	;;					\
-	mov  ar.fdr=r16;			\
-	ld8  r16=[r2],CTX(FPSR)-CTX(UNAT);	\
-	;;					\
-	mov  ar.unat=r16;			\
-	ld8  r16=[r2],CTX(PFS)-CTX(FPSR);	\
-	;;					\
-	mov  ar.fpsr=r16;			\
-	ld8  r16=[r2],CTX(LC)-CTX(PFS);		\
-	;;					\
-	mov  ar.pfs=r16;			\
-	ld8  r16=[r2];				\
-	;;					\
-	mov  ar.lc=r16;				\
-	;;
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_CTL_REGS				\
-	add	r2 = CTX(DCR),r32;		\
-	mov	r16 = cr.dcr;			\
-	;;					\
-	st8	[r2] = r16,CTX(IVA)-CTX(DCR);	\
-	;;                          		\
-	mov	r16 = cr.iva;			\
-	;;					\
-	st8	[r2] = r16,CTX(PTA)-CTX(IVA);	\
-	;;					\
-	mov r16 = cr.pta;			\
-	;;					\
-	st8 [r2] = r16 ;			\
-	;;
-
-	/*
-	 *	r33:		context_t base address
-	 */
-#define	RESTORE_CTL_REGS				\
-	add	r2 = CTX(DCR),r33;	        	\
-	;;						\
-	ld8	r16 = [r2],CTX(IVA)-CTX(DCR);		\
-	;;                      			\
-	mov	cr.dcr = r16;				\
-	dv_serialize_data;				\
-	;;						\
-	ld8	r16 = [r2],CTX(PTA)-CTX(IVA);		\
-	;;						\
-	mov	cr.iva = r16;				\
-	dv_serialize_data;				\
-	;;						\
-	ld8 r16 = [r2];					\
-	;;						\
-	mov cr.pta = r16;				\
-	dv_serialize_data;				\
-	;;
-
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_REGION_REGS			\
-	add	r2=CTX(RR0),r32;		\
-	mov	r16=rr[r0];			\
-	dep.z	r18=1,61,3;			\
-	;;					\
-	st8	[r2]=r16,8;			\
-	mov	r17=rr[r18];			\
-	dep.z	r18=2,61,3;			\
-	;;					\
-	st8	[r2]=r17,8;			\
-	mov	r16=rr[r18];			\
-	dep.z	r18=3,61,3;			\
-	;;					\
-	st8	[r2]=r16,8;			\
-	mov	r17=rr[r18];			\
-	dep.z	r18=4,61,3;			\
-	;;					\
-	st8	[r2]=r17,8;			\
-	mov	r16=rr[r18];			\
-	dep.z	r18=5,61,3;			\
-	;;					\
-	st8	[r2]=r16,8;			\
-	mov	r17=rr[r18];			\
-	dep.z	r18=7,61,3;			\
-	;;					\
-	st8	[r2]=r17,16;			\
-	mov	r16=rr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;			\
-	;;
-
-	/*
-	 *	r33:context_t base address
-	 */
-#define	RESTORE_REGION_REGS	\
-	add	r2=CTX(RR0),r33;\
-	mov r18=r0;		\
-	;;			\
-	ld8	r20=[r2],8;	\
-	;;	/* rr0 */	\
-	ld8	r21=[r2],8;	\
-	;;	/* rr1 */	\
-	ld8	r22=[r2],8;	\
-	;;	/* rr2 */	\
-	ld8	r23=[r2],8;	\
-	;;	/* rr3 */	\
-	ld8	r24=[r2],8;	\
-	;;	/* rr4 */	\
-	ld8	r25=[r2],16;	\
-	;;	/* rr5 */	\
-	ld8	r27=[r2];	\
-	;;	/* rr7 */	\
-	mov rr[r18]=r20;	\
-	dep.z	r18=1,61,3;	\
-	;;  /* rr1 */		\
-	mov rr[r18]=r21;	\
-	dep.z	r18=2,61,3;	\
-	;;  /* rr2 */		\
-	mov rr[r18]=r22;	\
-	dep.z	r18=3,61,3;	\
-	;;  /* rr3 */		\
-	mov rr[r18]=r23;	\
-	dep.z	r18=4,61,3;	\
-	;;  /* rr4 */		\
-	mov rr[r18]=r24;	\
-	dep.z	r18=5,61,3;	\
-	;;  /* rr5 */		\
-	mov rr[r18]=r25;	\
-	dep.z	r18=7,61,3;	\
-	;;  /* rr7 */		\
-	mov rr[r18]=r27;	\
-	;;			\
-	srlz.i;			\
-	;;
-
-
-
-	/*
-	 *	r32:	context_t base address
-	 *	r36~r39:scratch registers
-	 */
-#define	SAVE_DEBUG_REGS				\
-	add	r2=CTX(IBR0),r32;		\
-	add	r3=CTX(DBR0),r32;		\
-	mov	r16=ibr[r0];			\
-	mov	r17=dbr[r0];			\
-	;;					\
-	st8	[r2]=r16,8; 			\
-	st8	[r3]=r17,8;	    		\
-	add	r18=1,r0;		    	\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	add	r18=2,r0;			\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	add	r18=2,r0;			\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	add	r18=3,r0;			\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	add	r18=4,r0;			\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	add	r18=5,r0;			\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	add	r18=6,r0;			\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	add	r18=7,r0;			\
-	;;					\
-	mov	r16=ibr[r18];			\
-	mov	r17=dbr[r18];			\
-	;;					\
-	st8	[r2]=r16,8;		    	\
-	st8	[r3]=r17,8;			\
-	;;
-
-
-/*
- *      r33:    point to context_t structure
- *      ar.lc are corrupted.
- */
-#define RESTORE_DEBUG_REGS			\
-	add	r2=CTX(IBR0),r33;		\
-	add	r3=CTX(DBR0),r33;		\
-	mov r16=7;    				\
-	mov r17=r0;				\
-	;;                    			\
-	mov ar.lc = r16;			\
-	;; 					\
-1:						\
-	ld8 r18=[r2],8;		    		\
-	ld8 r19=[r3],8;				\
-	;;					\
-	mov ibr[r17]=r18;			\
-	mov dbr[r17]=r19;			\
-	;;   					\
-	srlz.i;					\
-	;; 					\
-	add r17=1,r17;				\
-	br.cloop.sptk 1b;			\
-	;;
-
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_FPU_LOW				\
-	add	r2=CTX(F2),r32;			\
-	add	r3=CTX(F3),r32;			\
-	;;					\
-	stf.spill.nta	[r2]=f2,32;		\
-	stf.spill.nta	[r3]=f3,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f4,32;		\
-	stf.spill.nta	[r3]=f5,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f6,32;		\
-	stf.spill.nta	[r3]=f7,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f8,32;		\
-	stf.spill.nta	[r3]=f9,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f10,32;		\
-	stf.spill.nta	[r3]=f11,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f12,32;		\
-	stf.spill.nta	[r3]=f13,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f14,32;		\
-	stf.spill.nta	[r3]=f15,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f16,32;		\
-	stf.spill.nta	[r3]=f17,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f18,32;		\
-	stf.spill.nta	[r3]=f19,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f20,32;		\
-	stf.spill.nta	[r3]=f21,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f22,32;		\
-	stf.spill.nta	[r3]=f23,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f24,32;		\
-	stf.spill.nta	[r3]=f25,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f26,32;		\
-	stf.spill.nta	[r3]=f27,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f28,32;		\
-	stf.spill.nta	[r3]=f29,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f30;		\
-	stf.spill.nta	[r3]=f31;		\
-	;;
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_FPU_HIGH				\
-	add	r2=CTX(F32),r32;		\
-	add	r3=CTX(F33),r32;		\
-	;;					\
-	stf.spill.nta	[r2]=f32,32;		\
-	stf.spill.nta	[r3]=f33,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f34,32;		\
-	stf.spill.nta	[r3]=f35,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f36,32;		\
-	stf.spill.nta	[r3]=f37,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f38,32;		\
-	stf.spill.nta	[r3]=f39,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f40,32;		\
-	stf.spill.nta	[r3]=f41,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f42,32;		\
-	stf.spill.nta	[r3]=f43,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f44,32;		\
-	stf.spill.nta	[r3]=f45,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f46,32;		\
-	stf.spill.nta	[r3]=f47,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f48,32;		\
-	stf.spill.nta	[r3]=f49,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f50,32;		\
-	stf.spill.nta	[r3]=f51,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f52,32;		\
-	stf.spill.nta	[r3]=f53,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f54,32;		\
-	stf.spill.nta	[r3]=f55,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f56,32;		\
-	stf.spill.nta	[r3]=f57,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f58,32;		\
-	stf.spill.nta	[r3]=f59,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f60,32;		\
-	stf.spill.nta	[r3]=f61,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f62,32;		\
-	stf.spill.nta	[r3]=f63,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f64,32;		\
-	stf.spill.nta	[r3]=f65,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f66,32;		\
-	stf.spill.nta	[r3]=f67,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f68,32;		\
-	stf.spill.nta	[r3]=f69,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f70,32;		\
-	stf.spill.nta	[r3]=f71,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f72,32;		\
-	stf.spill.nta	[r3]=f73,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f74,32;		\
-	stf.spill.nta	[r3]=f75,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f76,32;		\
-	stf.spill.nta	[r3]=f77,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f78,32;		\
-	stf.spill.nta	[r3]=f79,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f80,32;		\
-	stf.spill.nta	[r3]=f81,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f82,32;		\
-	stf.spill.nta	[r3]=f83,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f84,32;		\
-	stf.spill.nta	[r3]=f85,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f86,32;		\
-	stf.spill.nta	[r3]=f87,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f88,32;		\
-	stf.spill.nta	[r3]=f89,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f90,32;		\
-	stf.spill.nta	[r3]=f91,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f92,32;		\
-	stf.spill.nta	[r3]=f93,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f94,32;		\
-	stf.spill.nta	[r3]=f95,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f96,32;		\
-	stf.spill.nta	[r3]=f97,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f98,32;		\
-	stf.spill.nta	[r3]=f99,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f100,32;		\
-	stf.spill.nta	[r3]=f101,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f102,32;		\
-	stf.spill.nta	[r3]=f103,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f104,32;		\
-	stf.spill.nta	[r3]=f105,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f106,32;		\
-	stf.spill.nta	[r3]=f107,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f108,32;		\
-	stf.spill.nta	[r3]=f109,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f110,32;		\
-	stf.spill.nta	[r3]=f111,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f112,32;		\
-	stf.spill.nta	[r3]=f113,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f114,32;		\
-	stf.spill.nta	[r3]=f115,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f116,32;		\
-	stf.spill.nta	[r3]=f117,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f118,32;		\
-	stf.spill.nta	[r3]=f119,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f120,32;		\
-	stf.spill.nta	[r3]=f121,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f122,32;		\
-	stf.spill.nta	[r3]=f123,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f124,32;		\
-	stf.spill.nta	[r3]=f125,32;		\
-	;;					\
-	stf.spill.nta	[r2]=f126;		\
-	stf.spill.nta	[r3]=f127;		\
-	;;
-
-     /*
-      *      r33:    point to context_t structure
-      */
-#define	RESTORE_FPU_LOW				\
-    add     r2 = CTX(F2), r33;			\
-    add     r3 = CTX(F3), r33;			\
-    ;;						\
-    ldf.fill.nta f2 = [r2], 32;			\
-    ldf.fill.nta f3 = [r3], 32;			\
-    ;;						\
-    ldf.fill.nta f4 = [r2], 32;			\
-    ldf.fill.nta f5 = [r3], 32;			\
-    ;;						\
-    ldf.fill.nta f6 = [r2], 32;			\
-    ldf.fill.nta f7 = [r3], 32;			\
-    ;;						\
-    ldf.fill.nta f8 = [r2], 32;			\
-    ldf.fill.nta f9 = [r3], 32;			\
-    ;;						\
-    ldf.fill.nta f10 = [r2], 32;		\
-    ldf.fill.nta f11 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f12 = [r2], 32;		\
-    ldf.fill.nta f13 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f14 = [r2], 32;		\
-    ldf.fill.nta f15 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f16 = [r2], 32;		\
-    ldf.fill.nta f17 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f18 = [r2], 32;		\
-    ldf.fill.nta f19 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f20 = [r2], 32;		\
-    ldf.fill.nta f21 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f22 = [r2], 32;		\
-    ldf.fill.nta f23 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f24 = [r2], 32;		\
-    ldf.fill.nta f25 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f26 = [r2], 32;		\
-    ldf.fill.nta f27 = [r3], 32;		\
-	;;					\
-    ldf.fill.nta f28 = [r2], 32;		\
-    ldf.fill.nta f29 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f30 = [r2], 32;		\
-    ldf.fill.nta f31 = [r3], 32;		\
-    ;;
-
-
-
-    /*
-     *      r33:    point to context_t structure
-     */
-#define	RESTORE_FPU_HIGH			\
-    add     r2 = CTX(F32), r33;			\
-    add     r3 = CTX(F33), r33;			\
-    ;;						\
-    ldf.fill.nta f32 = [r2], 32;		\
-    ldf.fill.nta f33 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f34 = [r2], 32;		\
-    ldf.fill.nta f35 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f36 = [r2], 32;		\
-    ldf.fill.nta f37 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f38 = [r2], 32;		\
-    ldf.fill.nta f39 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f40 = [r2], 32;		\
-    ldf.fill.nta f41 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f42 = [r2], 32;		\
-    ldf.fill.nta f43 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f44 = [r2], 32;		\
-    ldf.fill.nta f45 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f46 = [r2], 32;		\
-    ldf.fill.nta f47 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f48 = [r2], 32;		\
-    ldf.fill.nta f49 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f50 = [r2], 32;		\
-    ldf.fill.nta f51 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f52 = [r2], 32;		\
-    ldf.fill.nta f53 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f54 = [r2], 32;		\
-    ldf.fill.nta f55 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f56 = [r2], 32;		\
-    ldf.fill.nta f57 = [r3], 32;   		\
-    ;;						\
-    ldf.fill.nta f58 = [r2], 32;		\
-    ldf.fill.nta f59 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f60 = [r2], 32;		\
-    ldf.fill.nta f61 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f62 = [r2], 32;		\
-    ldf.fill.nta f63 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f64 = [r2], 32;		\
-    ldf.fill.nta f65 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f66 = [r2], 32;		\
-    ldf.fill.nta f67 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f68 = [r2], 32;		\
-    ldf.fill.nta f69 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f70 = [r2], 32;		\
-    ldf.fill.nta f71 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f72 = [r2], 32;		\
-    ldf.fill.nta f73 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f74 = [r2], 32;		\
-    ldf.fill.nta f75 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f76 = [r2], 32;		\
-    ldf.fill.nta f77 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f78 = [r2], 32;		\
-    ldf.fill.nta f79 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f80 = [r2], 32;		\
-    ldf.fill.nta f81 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f82 = [r2], 32;		\
-    ldf.fill.nta f83 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f84 = [r2], 32;		\
-    ldf.fill.nta f85 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f86 = [r2], 32;		\
-    ldf.fill.nta f87 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f88 = [r2], 32;		\
-    ldf.fill.nta f89 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f90 = [r2], 32;		\
-    ldf.fill.nta f91 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f92 = [r2], 32;		\
-    ldf.fill.nta f93 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f94 = [r2], 32;		\
-    ldf.fill.nta f95 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f96 = [r2], 32;		\
-    ldf.fill.nta f97 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f98 = [r2], 32;		\
-    ldf.fill.nta f99 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f100 = [r2], 32;		\
-    ldf.fill.nta f101 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f102 = [r2], 32;		\
-    ldf.fill.nta f103 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f104 = [r2], 32;		\
-    ldf.fill.nta f105 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f106 = [r2], 32;		\
-    ldf.fill.nta f107 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f108 = [r2], 32;		\
-    ldf.fill.nta f109 = [r3], 32;   		\
-    ;;						\
-    ldf.fill.nta f110 = [r2], 32;		\
-    ldf.fill.nta f111 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f112 = [r2], 32;		\
-    ldf.fill.nta f113 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f114 = [r2], 32;		\
-    ldf.fill.nta f115 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f116 = [r2], 32;		\
-    ldf.fill.nta f117 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f118 = [r2], 32;		\
-    ldf.fill.nta f119 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f120 = [r2], 32;		\
-    ldf.fill.nta f121 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f122 = [r2], 32;		\
-    ldf.fill.nta f123 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f124 = [r2], 32;		\
-    ldf.fill.nta f125 = [r3], 32;		\
-    ;;						\
-    ldf.fill.nta f126 = [r2], 32;		\
-    ldf.fill.nta f127 = [r3], 32;		\
-    ;;
-
-	/*
-	 *	r32:		context_t base address
-	 */
-#define	SAVE_PTK_REGS				\
-    add r2=CTX(PKR0), r32;			\
-    mov r16=7;    				\
-    ;;                         			\
-    mov ar.lc=r16;  				\
-    mov r17=r0;					\
-    ;;						\
-1:						\
-    mov r18=pkr[r17];				\
-    ;;                     			\
-    srlz.i;					\
-    ;; 						\
-    st8 [r2]=r18, 8;				\
-    ;;    					\
-    add r17 =1,r17;				\
-    ;;                     			\
-    br.cloop.sptk 1b;				\
-    ;;
-
-/*
- *      r33:    point to context_t structure
- *      ar.lc are corrupted.
- */
-#define RESTORE_PTK_REGS	    		\
-    add r2=CTX(PKR0), r33;			\
-    mov r16=7;    				\
-    ;;                         			\
-    mov ar.lc=r16;  				\
-    mov r17=r0;					\
-    ;;						\
-1: 						\
-    ld8 r18=[r2], 8;				\
-    ;;						\
-    mov pkr[r17]=r18;				\
-    ;;    					\
-    srlz.i;					\
-    ;; 						\
-    add r17 =1,r17;				\
-    ;;                     			\
-    br.cloop.sptk 1b;				\
-    ;;
-
-
-/*
- * void vmm_trampoline( context_t * from,
- *			context_t * to)
- *
- * 	from:	r32
- *	to:	r33
- *  note: interrupt disabled before call this function.
- */
-GLOBAL_ENTRY(vmm_trampoline)
-    mov r16 = psr
-    adds r2 = CTX(PSR), r32
-    ;;
-    st8 [r2] = r16, 8       // psr
-    mov r17 = pr
-    ;;
-    st8 [r2] = r17, 8       // pr
-    mov r18 = ar.unat
-    ;;
-    st8 [r2] = r18
-    mov r17 = ar.rsc
-    ;;
-    adds r2 = CTX(RSC),r32
-    ;;
-    st8 [r2]= r17
-    mov ar.rsc =0
-    flushrs
-    ;;
-    SAVE_GENERAL_REGS
-    ;;
-    SAVE_KERNEL_REGS
-    ;;
-    SAVE_APP_REGS
-    ;;
-    SAVE_BRANCH_REGS
-    ;;
-    SAVE_CTL_REGS
-    ;;
-    SAVE_REGION_REGS
-    ;;
-    //SAVE_DEBUG_REGS
-    ;;
-    rsm  psr.dfl
-    ;;
-    srlz.d
-    ;;
-    SAVE_FPU_LOW
-    ;;
-    rsm  psr.dfh
-    ;;
-    srlz.d
-    ;;
-    SAVE_FPU_HIGH
-    ;;
-    SAVE_PTK_REGS
-    ;;
-    RESTORE_PTK_REGS
-    ;;
-    RESTORE_FPU_HIGH
-    ;;
-    RESTORE_FPU_LOW
-    ;;
-    //RESTORE_DEBUG_REGS
-    ;;
-    RESTORE_REGION_REGS
-    ;;
-    RESTORE_CTL_REGS
-    ;;
-    RESTORE_BRANCH_REGS
-    ;;
-    RESTORE_APP_REGS
-    ;;
-    RESTORE_KERNEL_REGS
-    ;;
-    RESTORE_GENERAL_REGS
-    ;;
-    adds r2=CTX(PSR), r33
-    ;;
-    ld8 r16=[r2], 8       // psr
-    ;;
-    mov psr.l=r16
-    ;;
-    srlz.d
-    ;;
-    ld8 r16=[r2], 8       // pr
-    ;;
-    mov pr =r16,-1
-    ld8 r16=[r2]       // unat
-    ;;
-    mov ar.unat=r16
-    ;;
-    adds r2=CTX(RSC),r33
-    ;;
-    ld8 r16 =[r2]
-    ;;
-    mov ar.rsc = r16
-    ;;
-    br.ret.sptk.few b0
-END(vmm_trampoline)
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
deleted file mode 100644
index 958815c..0000000
--- a/arch/ia64/kvm/vcpu.c
+++ /dev/null
@@ -1,2209 +0,0 @@
-/*
- * kvm_vcpu.c: handling all virtual cpu related thing.
- * Copyright (c) 2005, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- *  Shaofan Li (Susue Li) <susie.li@intel.com>
- *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- *  Xiantao Zhang <xiantao.zhang@intel.com>
- */
-
-#include <linux/kvm_host.h>
-#include <linux/types.h>
-
-#include <asm/processor.h>
-#include <asm/ia64regs.h>
-#include <asm/gcc_intrin.h>
-#include <asm/kregs.h>
-#include <asm/pgtable.h>
-#include <asm/tlb.h>
-
-#include "asm-offsets.h"
-#include "vcpu.h"
-
-/*
- * Special notes:
- * - Index by it/dt/rt sequence
- * - Only existing mode transitions are allowed in this table
- * - RSE is placed at lazy mode when emulating guest partial mode
- * - If gva happens to be rr0 and rr4, only allowed case is identity
- *   mapping (gva=gpa), or panic! (How?)
- */
-int mm_switch_table[8][8] = {
-	/*  2004/09/12(Kevin): Allow switch to self */
-	/*
-	 *  (it,dt,rt): (0,0,0) -> (1,1,1)
-	 *  This kind of transition usually occurs in the very early
-	 *  stage of Linux boot up procedure. Another case is in efi
-	 *  and pal calls. (see "arch/ia64/kernel/head.S")
-	 *
-	 *  (it,dt,rt): (0,0,0) -> (0,1,1)
-	 *  This kind of transition is found when OSYa exits efi boot
-	 *  service. Due to gva = gpa in this case (Same region),
-	 *  data access can be satisfied though itlb entry for physical
-	 *  emulation is hit.
-	 */
-	{SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
-	{0,  0,  0,  0,  0,  0,  0,  0},
-	{0,  0,  0,  0,  0,  0,  0,  0},
-	/*
-	 *  (it,dt,rt): (0,1,1) -> (1,1,1)
-	 *  This kind of transition is found in OSYa.
-	 *
-	 *  (it,dt,rt): (0,1,1) -> (0,0,0)
-	 *  This kind of transition is found in OSYa
-	 */
-	{SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
-	/* (1,0,0)->(1,1,1) */
-	{0,  0,  0,  0,  0,  0,  0,  SW_P2V},
-	/*
-	 *  (it,dt,rt): (1,0,1) -> (1,1,1)
-	 *  This kind of transition usually occurs when Linux returns
-	 *  from the low level TLB miss handlers.
-	 *  (see "arch/ia64/kernel/ivt.S")
-	 */
-	{0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
-	{0,  0,  0,  0,  0,  0,  0,  0},
-	/*
-	 *  (it,dt,rt): (1,1,1) -> (1,0,1)
-	 *  This kind of transition usually occurs in Linux low level
-	 *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
-	 *
-	 *  (it,dt,rt): (1,1,1) -> (0,0,0)
-	 *  This kind of transition usually occurs in pal and efi calls,
-	 *  which requires running in physical mode.
-	 *  (see "arch/ia64/kernel/head.S")
-	 *  (1,1,1)->(1,0,0)
-	 */
-
-	{SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
-};
-
-void physical_mode_init(struct kvm_vcpu  *vcpu)
-{
-	vcpu->arch.mode_flags = GUEST_IN_PHY;
-}
-
-void switch_to_physical_rid(struct kvm_vcpu *vcpu)
-{
-	unsigned long psr;
-
-	/* Save original virtual mode rr[0] and rr[4] */
-	psr = ia64_clear_ic();
-	ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
-	ia64_srlz_d();
-	ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
-	ia64_srlz_d();
-
-	ia64_set_psr(psr);
-	return;
-}
-
-void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
-{
-	unsigned long psr;
-
-	psr = ia64_clear_ic();
-	ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
-	ia64_srlz_d();
-	ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
-	ia64_srlz_d();
-	ia64_set_psr(psr);
-	return;
-}
-
-static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
-{
-	return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
-}
-
-void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
-					struct ia64_psr new_psr)
-{
-	int act;
-	act = mm_switch_action(old_psr, new_psr);
-	switch (act) {
-	case SW_V2P:
-		/*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
-		old_psr.val, new_psr.val);*/
-		switch_to_physical_rid(vcpu);
-		/*
-		 * Set rse to enforced lazy, to prevent active rse
-		 *save/restor when guest physical mode.
-		 */
-		vcpu->arch.mode_flags |= GUEST_IN_PHY;
-		break;
-	case SW_P2V:
-		switch_to_virtual_rid(vcpu);
-		/*
-		 * recover old mode which is saved when entering
-		 * guest physical mode
-		 */
-		vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
-		break;
-	case SW_SELF:
-		break;
-	case SW_NOP:
-		break;
-	default:
-		/* Sanity check */
-		break;
-	}
-	return;
-}
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- *  - insertions (itc.*, itr.*)
- *  - purges (ptc.* and ptr.*)
- *  - tpa
- *  - tak
- *  - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
-					struct ia64_psr new_psr)
-{
-
-	if ((old_psr.dt != new_psr.dt)
-			|| (old_psr.it != new_psr.it)
-			|| (old_psr.rt != new_psr.rt))
-		switch_mm_mode(vcpu, old_psr, new_psr);
-
-	return;
-}
-
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- *  - insertions (itc.*, itr.*)
- *  - purges (ptc.* and ptr.*)
- *  - tpa
- *  - tak
- *  - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
-void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
-{
-	if (is_physical_mode(vcpu)) {
-		vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
-		switch_to_virtual_rid(vcpu);
-	}
-	return;
-}
-
-/* Recover always follows prepare */
-void recover_if_physical_mode(struct kvm_vcpu *vcpu)
-{
-	if (is_physical_mode(vcpu))
-		switch_to_physical_rid(vcpu);
-	vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
-	return;
-}
-
-#define RPT(x)	((u16) &((struct kvm_pt_regs *)0)->x)
-
-static u16 gr_info[32] = {
-	0, 	/* r0 is read-only : WE SHOULD NEVER GET THIS */
-	RPT(r1), RPT(r2), RPT(r3),
-	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
-	RPT(r8), RPT(r9), RPT(r10), RPT(r11),
-	RPT(r12), RPT(r13), RPT(r14), RPT(r15),
-	RPT(r16), RPT(r17), RPT(r18), RPT(r19),
-	RPT(r20), RPT(r21), RPT(r22), RPT(r23),
-	RPT(r24), RPT(r25), RPT(r26), RPT(r27),
-	RPT(r28), RPT(r29), RPT(r30), RPT(r31)
-};
-
-#define IA64_FIRST_STACKED_GR   32
-#define IA64_FIRST_ROTATING_FR  32
-
-static inline unsigned long
-rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
-{
-	reg += rrb;
-	if (reg >= sor)
-		reg -= sor;
-	return reg;
-}
-
-/*
- * Return the (rotated) index for floating point register
- * be in the REGNUM (REGNUM must range from 32-127,
- * result is in the range from 0-95.
- */
-static inline unsigned long fph_index(struct kvm_pt_regs *regs,
-						long regnum)
-{
-	unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
-	return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
-}
-
-/*
- * The inverse of the above: given bspstore and the number of
- * registers, calculate ar.bsp.
- */
-static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
-							long num_regs)
-{
-	long delta = ia64_rse_slot_num(addr) + num_regs;
-	int i = 0;
-
-	if (num_regs < 0)
-		delta -= 0x3e;
-	if (delta < 0) {
-		while (delta <= -0x3f) {
-			i--;
-			delta += 0x3f;
-		}
-	} else {
-		while (delta >= 0x3f) {
-			i++;
-			delta -= 0x3f;
-		}
-	}
-
-	return addr + num_regs + i;
-}
-
-static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
-					unsigned long *val, int *nat)
-{
-	unsigned long *bsp, *addr, *rnat_addr, *bspstore;
-	unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
-	unsigned long nat_mask;
-	unsigned long old_rsc, new_rsc;
-	long sof = (regs->cr_ifs) & 0x7f;
-	long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
-	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-	long ridx = r1 - 32;
-
-	if (ridx < sor)
-		ridx = rotate_reg(sor, rrb_gr, ridx);
-
-	old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
-	new_rsc = old_rsc&(~(0x3));
-	ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
-
-	bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
-	bsp = kbs + (regs->loadrs >> 19);
-
-	addr = kvm_rse_skip_regs(bsp, -sof + ridx);
-	nat_mask = 1UL << ia64_rse_slot_num(addr);
-	rnat_addr = ia64_rse_rnat_addr(addr);
-
-	if (addr >= bspstore) {
-		ia64_flushrs();
-		ia64_mf();
-		bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
-	}
-	*val = *addr;
-	if (nat) {
-		if (bspstore < rnat_addr)
-			*nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
-							& nat_mask);
-		else
-			*nat = (int)!!((*rnat_addr) & nat_mask);
-		ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
-	}
-}
-
-void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
-				unsigned long val, unsigned long nat)
-{
-	unsigned long *bsp, *bspstore, *addr, *rnat_addr;
-	unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
-	unsigned long nat_mask;
-	unsigned long old_rsc, new_rsc, psr;
-	unsigned long rnat;
-	long sof = (regs->cr_ifs) & 0x7f;
-	long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
-	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
-	long ridx = r1 - 32;
-
-	if (ridx < sor)
-		ridx = rotate_reg(sor, rrb_gr, ridx);
-
-	old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
-	/* put RSC to lazy mode, and set loadrs 0 */
-	new_rsc = old_rsc & (~0x3fff0003);
-	ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
-	bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
-
-	addr = kvm_rse_skip_regs(bsp, -sof + ridx);
-	nat_mask = 1UL << ia64_rse_slot_num(addr);
-	rnat_addr = ia64_rse_rnat_addr(addr);
-
-	local_irq_save(psr);
-	bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
-	if (addr >= bspstore) {
-
-		ia64_flushrs();
-		ia64_mf();
-		*addr = val;
-		bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
-		rnat = ia64_getreg(_IA64_REG_AR_RNAT);
-		if (bspstore < rnat_addr)
-			rnat = rnat & (~nat_mask);
-		else
-			*rnat_addr = (*rnat_addr)&(~nat_mask);
-
-		ia64_mf();
-		ia64_loadrs();
-		ia64_setreg(_IA64_REG_AR_RNAT, rnat);
-	} else {
-		rnat = ia64_getreg(_IA64_REG_AR_RNAT);
-		*addr = val;
-		if (bspstore < rnat_addr)
-			rnat = rnat&(~nat_mask);
-		else
-			*rnat_addr = (*rnat_addr) & (~nat_mask);
-
-		ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
-		ia64_setreg(_IA64_REG_AR_RNAT, rnat);
-	}
-	local_irq_restore(psr);
-	ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
-}
-
-void getreg(unsigned long regnum, unsigned long *val,
-				int *nat, struct kvm_pt_regs *regs)
-{
-	unsigned long addr, *unat;
-	if (regnum >= IA64_FIRST_STACKED_GR) {
-		get_rse_reg(regs, regnum, val, nat);
-		return;
-	}
-
-	/*
-	 * Now look at registers in [0-31] range and init correct UNAT
-	 */
-	addr = (unsigned long)regs;
-	unat = &regs->eml_unat;
-
-	addr += gr_info[regnum];
-
-	*val  = *(unsigned long *)addr;
-	/*
-	 * do it only when requested
-	 */
-	if (nat)
-		*nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
-}
-
-void setreg(unsigned long regnum, unsigned long val,
-			int nat, struct kvm_pt_regs *regs)
-{
-	unsigned long addr;
-	unsigned long bitmask;
-	unsigned long *unat;
-
-	/*
-	 * First takes care of stacked registers
-	 */
-	if (regnum >= IA64_FIRST_STACKED_GR) {
-		set_rse_reg(regs, regnum, val, nat);
-		return;
-	}
-
-	/*
-	 * Now look at registers in [0-31] range and init correct UNAT
-	 */
-	addr = (unsigned long)regs;
-	unat = &regs->eml_unat;
-	/*
-	 * add offset from base of struct
-	 * and do it !
-	 */
-	addr += gr_info[regnum];
-
-	*(unsigned long *)addr = val;
-
-	/*
-	 * We need to clear the corresponding UNAT bit to fully emulate the load
-	 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
-	 */
-	bitmask   = 1UL << ((addr >> 3) & 0x3f);
-	if (nat)
-		*unat |= bitmask;
-	 else
-		*unat &= ~bitmask;
-
-}
-
-u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
-{
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-	unsigned long val;
-
-	if (!reg)
-		return 0;
-	getreg(reg, &val, 0, regs);
-	return val;
-}
-
-void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
-{
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-	long sof = (regs->cr_ifs) & 0x7f;
-
-	if (!reg)
-		return;
-	if (reg >= sof + 32)
-		return;
-	setreg(reg, value, nat, regs);	/* FIXME: handle NATs later*/
-}
-
-void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
-				struct kvm_pt_regs *regs)
-{
-	/* Take floating register rotation into consideration*/
-	if (regnum >= IA64_FIRST_ROTATING_FR)
-		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-#define CASE_FIXED_FP(reg)			\
-	case  (reg) :				\
-		ia64_stf_spill(fpval, reg);	\
-	break
-
-	switch (regnum) {
-		CASE_FIXED_FP(0);
-		CASE_FIXED_FP(1);
-		CASE_FIXED_FP(2);
-		CASE_FIXED_FP(3);
-		CASE_FIXED_FP(4);
-		CASE_FIXED_FP(5);
-
-		CASE_FIXED_FP(6);
-		CASE_FIXED_FP(7);
-		CASE_FIXED_FP(8);
-		CASE_FIXED_FP(9);
-		CASE_FIXED_FP(10);
-		CASE_FIXED_FP(11);
-
-		CASE_FIXED_FP(12);
-		CASE_FIXED_FP(13);
-		CASE_FIXED_FP(14);
-		CASE_FIXED_FP(15);
-		CASE_FIXED_FP(16);
-		CASE_FIXED_FP(17);
-		CASE_FIXED_FP(18);
-		CASE_FIXED_FP(19);
-		CASE_FIXED_FP(20);
-		CASE_FIXED_FP(21);
-		CASE_FIXED_FP(22);
-		CASE_FIXED_FP(23);
-		CASE_FIXED_FP(24);
-		CASE_FIXED_FP(25);
-		CASE_FIXED_FP(26);
-		CASE_FIXED_FP(27);
-		CASE_FIXED_FP(28);
-		CASE_FIXED_FP(29);
-		CASE_FIXED_FP(30);
-		CASE_FIXED_FP(31);
-		CASE_FIXED_FP(32);
-		CASE_FIXED_FP(33);
-		CASE_FIXED_FP(34);
-		CASE_FIXED_FP(35);
-		CASE_FIXED_FP(36);
-		CASE_FIXED_FP(37);
-		CASE_FIXED_FP(38);
-		CASE_FIXED_FP(39);
-		CASE_FIXED_FP(40);
-		CASE_FIXED_FP(41);
-		CASE_FIXED_FP(42);
-		CASE_FIXED_FP(43);
-		CASE_FIXED_FP(44);
-		CASE_FIXED_FP(45);
-		CASE_FIXED_FP(46);
-		CASE_FIXED_FP(47);
-		CASE_FIXED_FP(48);
-		CASE_FIXED_FP(49);
-		CASE_FIXED_FP(50);
-		CASE_FIXED_FP(51);
-		CASE_FIXED_FP(52);
-		CASE_FIXED_FP(53);
-		CASE_FIXED_FP(54);
-		CASE_FIXED_FP(55);
-		CASE_FIXED_FP(56);
-		CASE_FIXED_FP(57);
-		CASE_FIXED_FP(58);
-		CASE_FIXED_FP(59);
-		CASE_FIXED_FP(60);
-		CASE_FIXED_FP(61);
-		CASE_FIXED_FP(62);
-		CASE_FIXED_FP(63);
-		CASE_FIXED_FP(64);
-		CASE_FIXED_FP(65);
-		CASE_FIXED_FP(66);
-		CASE_FIXED_FP(67);
-		CASE_FIXED_FP(68);
-		CASE_FIXED_FP(69);
-		CASE_FIXED_FP(70);
-		CASE_FIXED_FP(71);
-		CASE_FIXED_FP(72);
-		CASE_FIXED_FP(73);
-		CASE_FIXED_FP(74);
-		CASE_FIXED_FP(75);
-		CASE_FIXED_FP(76);
-		CASE_FIXED_FP(77);
-		CASE_FIXED_FP(78);
-		CASE_FIXED_FP(79);
-		CASE_FIXED_FP(80);
-		CASE_FIXED_FP(81);
-		CASE_FIXED_FP(82);
-		CASE_FIXED_FP(83);
-		CASE_FIXED_FP(84);
-		CASE_FIXED_FP(85);
-		CASE_FIXED_FP(86);
-		CASE_FIXED_FP(87);
-		CASE_FIXED_FP(88);
-		CASE_FIXED_FP(89);
-		CASE_FIXED_FP(90);
-		CASE_FIXED_FP(91);
-		CASE_FIXED_FP(92);
-		CASE_FIXED_FP(93);
-		CASE_FIXED_FP(94);
-		CASE_FIXED_FP(95);
-		CASE_FIXED_FP(96);
-		CASE_FIXED_FP(97);
-		CASE_FIXED_FP(98);
-		CASE_FIXED_FP(99);
-		CASE_FIXED_FP(100);
-		CASE_FIXED_FP(101);
-		CASE_FIXED_FP(102);
-		CASE_FIXED_FP(103);
-		CASE_FIXED_FP(104);
-		CASE_FIXED_FP(105);
-		CASE_FIXED_FP(106);
-		CASE_FIXED_FP(107);
-		CASE_FIXED_FP(108);
-		CASE_FIXED_FP(109);
-		CASE_FIXED_FP(110);
-		CASE_FIXED_FP(111);
-		CASE_FIXED_FP(112);
-		CASE_FIXED_FP(113);
-		CASE_FIXED_FP(114);
-		CASE_FIXED_FP(115);
-		CASE_FIXED_FP(116);
-		CASE_FIXED_FP(117);
-		CASE_FIXED_FP(118);
-		CASE_FIXED_FP(119);
-		CASE_FIXED_FP(120);
-		CASE_FIXED_FP(121);
-		CASE_FIXED_FP(122);
-		CASE_FIXED_FP(123);
-		CASE_FIXED_FP(124);
-		CASE_FIXED_FP(125);
-		CASE_FIXED_FP(126);
-		CASE_FIXED_FP(127);
-	}
-#undef CASE_FIXED_FP
-}
-
-void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
-					struct kvm_pt_regs *regs)
-{
-	/* Take floating register rotation into consideration*/
-	if (regnum >= IA64_FIRST_ROTATING_FR)
-		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
-
-#define CASE_FIXED_FP(reg)			\
-	case (reg) :				\
-		ia64_ldf_fill(reg, fpval);	\
-	break
-
-	switch (regnum) {
-		CASE_FIXED_FP(2);
-		CASE_FIXED_FP(3);
-		CASE_FIXED_FP(4);
-		CASE_FIXED_FP(5);
-
-		CASE_FIXED_FP(6);
-		CASE_FIXED_FP(7);
-		CASE_FIXED_FP(8);
-		CASE_FIXED_FP(9);
-		CASE_FIXED_FP(10);
-		CASE_FIXED_FP(11);
-
-		CASE_FIXED_FP(12);
-		CASE_FIXED_FP(13);
-		CASE_FIXED_FP(14);
-		CASE_FIXED_FP(15);
-		CASE_FIXED_FP(16);
-		CASE_FIXED_FP(17);
-		CASE_FIXED_FP(18);
-		CASE_FIXED_FP(19);
-		CASE_FIXED_FP(20);
-		CASE_FIXED_FP(21);
-		CASE_FIXED_FP(22);
-		CASE_FIXED_FP(23);
-		CASE_FIXED_FP(24);
-		CASE_FIXED_FP(25);
-		CASE_FIXED_FP(26);
-		CASE_FIXED_FP(27);
-		CASE_FIXED_FP(28);
-		CASE_FIXED_FP(29);
-		CASE_FIXED_FP(30);
-		CASE_FIXED_FP(31);
-		CASE_FIXED_FP(32);
-		CASE_FIXED_FP(33);
-		CASE_FIXED_FP(34);
-		CASE_FIXED_FP(35);
-		CASE_FIXED_FP(36);
-		CASE_FIXED_FP(37);
-		CASE_FIXED_FP(38);
-		CASE_FIXED_FP(39);
-		CASE_FIXED_FP(40);
-		CASE_FIXED_FP(41);
-		CASE_FIXED_FP(42);
-		CASE_FIXED_FP(43);
-		CASE_FIXED_FP(44);
-		CASE_FIXED_FP(45);
-		CASE_FIXED_FP(46);
-		CASE_FIXED_FP(47);
-		CASE_FIXED_FP(48);
-		CASE_FIXED_FP(49);
-		CASE_FIXED_FP(50);
-		CASE_FIXED_FP(51);
-		CASE_FIXED_FP(52);
-		CASE_FIXED_FP(53);
-		CASE_FIXED_FP(54);
-		CASE_FIXED_FP(55);
-		CASE_FIXED_FP(56);
-		CASE_FIXED_FP(57);
-		CASE_FIXED_FP(58);
-		CASE_FIXED_FP(59);
-		CASE_FIXED_FP(60);
-		CASE_FIXED_FP(61);
-		CASE_FIXED_FP(62);
-		CASE_FIXED_FP(63);
-		CASE_FIXED_FP(64);
-		CASE_FIXED_FP(65);
-		CASE_FIXED_FP(66);
-		CASE_FIXED_FP(67);
-		CASE_FIXED_FP(68);
-		CASE_FIXED_FP(69);
-		CASE_FIXED_FP(70);
-		CASE_FIXED_FP(71);
-		CASE_FIXED_FP(72);
-		CASE_FIXED_FP(73);
-		CASE_FIXED_FP(74);
-		CASE_FIXED_FP(75);
-		CASE_FIXED_FP(76);
-		CASE_FIXED_FP(77);
-		CASE_FIXED_FP(78);
-		CASE_FIXED_FP(79);
-		CASE_FIXED_FP(80);
-		CASE_FIXED_FP(81);
-		CASE_FIXED_FP(82);
-		CASE_FIXED_FP(83);
-		CASE_FIXED_FP(84);
-		CASE_FIXED_FP(85);
-		CASE_FIXED_FP(86);
-		CASE_FIXED_FP(87);
-		CASE_FIXED_FP(88);
-		CASE_FIXED_FP(89);
-		CASE_FIXED_FP(90);
-		CASE_FIXED_FP(91);
-		CASE_FIXED_FP(92);
-		CASE_FIXED_FP(93);
-		CASE_FIXED_FP(94);
-		CASE_FIXED_FP(95);
-		CASE_FIXED_FP(96);
-		CASE_FIXED_FP(97);
-		CASE_FIXED_FP(98);
-		CASE_FIXED_FP(99);
-		CASE_FIXED_FP(100);
-		CASE_FIXED_FP(101);
-		CASE_FIXED_FP(102);
-		CASE_FIXED_FP(103);
-		CASE_FIXED_FP(104);
-		CASE_FIXED_FP(105);
-		CASE_FIXED_FP(106);
-		CASE_FIXED_FP(107);
-		CASE_FIXED_FP(108);
-		CASE_FIXED_FP(109);
-		CASE_FIXED_FP(110);
-		CASE_FIXED_FP(111);
-		CASE_FIXED_FP(112);
-		CASE_FIXED_FP(113);
-		CASE_FIXED_FP(114);
-		CASE_FIXED_FP(115);
-		CASE_FIXED_FP(116);
-		CASE_FIXED_FP(117);
-		CASE_FIXED_FP(118);
-		CASE_FIXED_FP(119);
-		CASE_FIXED_FP(120);
-		CASE_FIXED_FP(121);
-		CASE_FIXED_FP(122);
-		CASE_FIXED_FP(123);
-		CASE_FIXED_FP(124);
-		CASE_FIXED_FP(125);
-		CASE_FIXED_FP(126);
-		CASE_FIXED_FP(127);
-	}
-}
-
-void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
-						struct ia64_fpreg *val)
-{
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	getfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
-}
-
-void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
-						struct ia64_fpreg *val)
-{
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	if (reg > 1)
-		setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
-}
-
-/*
- * The Altix RTC is mapped specially here for the vmm module
- */
-#define SN_RTC_BASE	(u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
-static long kvm_get_itc(struct kvm_vcpu *vcpu)
-{
-#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
-	struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
-
-	if (kvm->arch.is_sn2)
-		return (*SN_RTC_BASE);
-	else
-#endif
-		return ia64_getreg(_IA64_REG_AR_ITC);
-}
-
-/************************************************************************
- * lsapic timer
- ***********************************************************************/
-u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
-{
-	unsigned long guest_itc;
-	guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
-
-	if (guest_itc >= VMX(vcpu, last_itc)) {
-		VMX(vcpu, last_itc) = guest_itc;
-		return  guest_itc;
-	} else
-		return VMX(vcpu, last_itc);
-}
-
-static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
-static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
-{
-	struct kvm_vcpu *v;
-	struct kvm *kvm;
-	int i;
-	long itc_offset = val - kvm_get_itc(vcpu);
-	unsigned long vitv = VCPU(vcpu, itv);
-
-	kvm = (struct kvm *)KVM_VM_BASE;
-
-	if (kvm_vcpu_is_bsp(vcpu)) {
-		for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
-			v = (struct kvm_vcpu *)((char *)vcpu +
-					sizeof(struct kvm_vcpu_data) * i);
-			VMX(v, itc_offset) = itc_offset;
-			VMX(v, last_itc) = 0;
-		}
-	}
-	VMX(vcpu, last_itc) = 0;
-	if (VCPU(vcpu, itm) <= val) {
-		VMX(vcpu, itc_check) = 0;
-		vcpu_unpend_interrupt(vcpu, vitv);
-	} else {
-		VMX(vcpu, itc_check) = 1;
-		vcpu_set_itm(vcpu, VCPU(vcpu, itm));
-	}
-
-}
-
-static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
-{
-	return ((u64)VCPU(vcpu, itm));
-}
-
-static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
-{
-	unsigned long vitv = VCPU(vcpu, itv);
-	VCPU(vcpu, itm) = val;
-
-	if (val > vcpu_get_itc(vcpu)) {
-		VMX(vcpu, itc_check) = 1;
-		vcpu_unpend_interrupt(vcpu, vitv);
-		VMX(vcpu, timer_pending) = 0;
-	} else
-		VMX(vcpu, itc_check) = 0;
-}
-
-#define  ITV_VECTOR(itv)    (itv&0xff)
-#define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
-
-static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
-{
-	VCPU(vcpu, itv) = val;
-	if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
-		vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
-		vcpu->arch.timer_pending = 0;
-	}
-}
-
-static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
-{
-	int vec;
-
-	vec = highest_inservice_irq(vcpu);
-	if (vec == NULL_VECTOR)
-		return;
-	VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
-	VCPU(vcpu, eoi) = 0;
-	vcpu->arch.irq_new_pending = 1;
-
-}
-
-/* See Table 5-8 in SDM vol2 for the definition */
-int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
-{
-	union ia64_tpr vtpr;
-
-	vtpr.val = VCPU(vcpu, tpr);
-
-	if (h_inservice == NMI_VECTOR)
-		return IRQ_MASKED_BY_INSVC;
-
-	if (h_pending == NMI_VECTOR) {
-		/* Non Maskable Interrupt */
-		return IRQ_NO_MASKED;
-	}
-
-	if (h_inservice == ExtINT_VECTOR)
-		return IRQ_MASKED_BY_INSVC;
-
-	if (h_pending == ExtINT_VECTOR) {
-		if (vtpr.mmi) {
-			/* mask all external IRQ */
-			return IRQ_MASKED_BY_VTPR;
-		} else
-			return IRQ_NO_MASKED;
-	}
-
-	if (is_higher_irq(h_pending, h_inservice)) {
-		if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
-			return IRQ_NO_MASKED;
-		else
-			return IRQ_MASKED_BY_VTPR;
-	} else {
-		return IRQ_MASKED_BY_INSVC;
-	}
-}
-
-void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
-{
-	long spsr;
-	int ret;
-
-	local_irq_save(spsr);
-	ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
-	local_irq_restore(spsr);
-
-	vcpu->arch.irq_new_pending = 1;
-}
-
-void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
-{
-	long spsr;
-	int ret;
-
-	local_irq_save(spsr);
-	ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
-	local_irq_restore(spsr);
-	if (ret) {
-		vcpu->arch.irq_new_pending = 1;
-		wmb();
-	}
-}
-
-void update_vhpi(struct kvm_vcpu *vcpu, int vec)
-{
-	u64 vhpi;
-
-	if (vec == NULL_VECTOR)
-		vhpi = 0;
-	else if (vec == NMI_VECTOR)
-		vhpi = 32;
-	else if (vec == ExtINT_VECTOR)
-		vhpi = 16;
-	else
-		vhpi = vec >> 4;
-
-	VCPU(vcpu, vhpi) = vhpi;
-	if (VCPU(vcpu, vac).a_int)
-		ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
-				(u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
-}
-
-u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
-{
-	int vec, h_inservice, mask;
-
-	vec = highest_pending_irq(vcpu);
-	h_inservice = highest_inservice_irq(vcpu);
-	mask = irq_masked(vcpu, vec, h_inservice);
-	if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
-		if (VCPU(vcpu, vhpi))
-			update_vhpi(vcpu, NULL_VECTOR);
-		return IA64_SPURIOUS_INT_VECTOR;
-	}
-	if (mask == IRQ_MASKED_BY_VTPR) {
-		update_vhpi(vcpu, vec);
-		return IA64_SPURIOUS_INT_VECTOR;
-	}
-	VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
-	vcpu_unpend_interrupt(vcpu, vec);
-	return  (u64)vec;
-}
-
-/**************************************************************************
-  Privileged operation emulation routines
- **************************************************************************/
-u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	union ia64_pta vpta;
-	union ia64_rr vrr;
-	u64 pval;
-	u64 vhpt_offset;
-
-	vpta.val = vcpu_get_pta(vcpu);
-	vrr.val = vcpu_get_rr(vcpu, vadr);
-	vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
-	if (vpta.vf) {
-		pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
-				vpta.val, 0, 0, 0, 0);
-	} else {
-		pval = (vadr & VRN_MASK) | vhpt_offset |
-			(vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
-	}
-	return  pval;
-}
-
-u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	union ia64_rr vrr;
-	union ia64_pta vpta;
-	u64 pval;
-
-	vpta.val = vcpu_get_pta(vcpu);
-	vrr.val = vcpu_get_rr(vcpu, vadr);
-	if (vpta.vf) {
-		pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
-						0, 0, 0, 0, 0);
-	} else
-		pval = 1;
-
-	return  pval;
-}
-
-u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
-{
-	struct thash_data *data;
-	union ia64_pta vpta;
-	u64 key;
-
-	vpta.val = vcpu_get_pta(vcpu);
-	if (vpta.vf == 0) {
-		key = 1;
-		return key;
-	}
-	data = vtlb_lookup(vcpu, vadr, D_TLB);
-	if (!data || !data->p)
-		key = 1;
-	else
-		key = data->key;
-
-	return key;
-}
-
-void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long thash, vadr;
-
-	vadr = vcpu_get_gr(vcpu, inst.M46.r3);
-	thash = vcpu_thash(vcpu, vadr);
-	vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
-}
-
-void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long tag, vadr;
-
-	vadr = vcpu_get_gr(vcpu, inst.M46.r3);
-	tag = vcpu_ttag(vcpu, vadr);
-	vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
-}
-
-int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
-{
-	struct thash_data *data;
-	union ia64_isr visr, pt_isr;
-	struct kvm_pt_regs *regs;
-	struct ia64_psr vpsr;
-
-	regs = vcpu_regs(vcpu);
-	pt_isr.val = VMX(vcpu, cr_isr);
-	visr.val = 0;
-	visr.ei = pt_isr.ei;
-	visr.ir = pt_isr.ir;
-	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-	visr.na = 1;
-
-	data = vhpt_lookup(vadr);
-	if (data) {
-		if (data->p == 0) {
-			vcpu_set_isr(vcpu, visr.val);
-			data_page_not_present(vcpu, vadr);
-			return IA64_FAULT;
-		} else if (data->ma == VA_MATTR_NATPAGE) {
-			vcpu_set_isr(vcpu, visr.val);
-			dnat_page_consumption(vcpu, vadr);
-			return IA64_FAULT;
-		} else {
-			*padr = (data->gpaddr >> data->ps << data->ps) |
-				(vadr & (PSIZE(data->ps) - 1));
-			return IA64_NO_FAULT;
-		}
-	}
-
-	data = vtlb_lookup(vcpu, vadr, D_TLB);
-	if (data) {
-		if (data->p == 0) {
-			vcpu_set_isr(vcpu, visr.val);
-			data_page_not_present(vcpu, vadr);
-			return IA64_FAULT;
-		} else if (data->ma == VA_MATTR_NATPAGE) {
-			vcpu_set_isr(vcpu, visr.val);
-			dnat_page_consumption(vcpu, vadr);
-			return IA64_FAULT;
-		} else{
-			*padr = ((data->ppn >> (data->ps - 12)) << data->ps)
-				| (vadr & (PSIZE(data->ps) - 1));
-			return IA64_NO_FAULT;
-		}
-	}
-	if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
-		if (vpsr.ic) {
-			vcpu_set_isr(vcpu, visr.val);
-			alt_dtlb(vcpu, vadr);
-			return IA64_FAULT;
-		} else {
-			nested_dtlb(vcpu);
-			return IA64_FAULT;
-		}
-	} else {
-		if (vpsr.ic) {
-			vcpu_set_isr(vcpu, visr.val);
-			dvhpt_fault(vcpu, vadr);
-			return IA64_FAULT;
-		} else{
-			nested_dtlb(vcpu);
-			return IA64_FAULT;
-		}
-	}
-
-	return IA64_NO_FAULT;
-}
-
-int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r1, r3;
-
-	r3 = vcpu_get_gr(vcpu, inst.M46.r3);
-
-	if (vcpu_tpa(vcpu, r3, &r1))
-		return IA64_FAULT;
-
-	vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
-	return(IA64_NO_FAULT);
-}
-
-void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r1, r3;
-
-	r3 = vcpu_get_gr(vcpu, inst.M46.r3);
-	r1 = vcpu_tak(vcpu, r3);
-	vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
-}
-
-/************************************
- * Insert/Purge translation register/cache
- ************************************/
-void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
-{
-	thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
-}
-
-void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
-{
-	thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
-}
-
-void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
-	u64 ps, va, rid;
-	struct thash_data *p_itr;
-
-	ps = itir_ps(itir);
-	va = PAGEALIGN(ifa, ps);
-	pte &= ~PAGE_FLAGS_RV_MASK;
-	rid = vcpu_get_rr(vcpu, ifa);
-	rid = rid & RR_RID_MASK;
-	p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
-	vcpu_set_tr(p_itr, pte, itir, va, rid);
-	vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
-}
-
-
-void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
-{
-	u64 gpfn;
-	u64 ps, va, rid;
-	struct thash_data *p_dtr;
-
-	ps = itir_ps(itir);
-	va = PAGEALIGN(ifa, ps);
-	pte &= ~PAGE_FLAGS_RV_MASK;
-
-	if (ps != _PAGE_SIZE_16M)
-		thash_purge_entries(vcpu, va, ps);
-	gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
-	if (__gpfn_is_io(gpfn))
-		pte |= VTLB_PTE_IO;
-	rid = vcpu_get_rr(vcpu, va);
-	rid = rid & RR_RID_MASK;
-	p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
-	vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
-							pte, itir, va, rid);
-	vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
-}
-
-void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
-{
-	int index;
-	u64 va;
-
-	va = PAGEALIGN(ifa, ps);
-	while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
-		vcpu->arch.dtrs[index].page_flags = 0;
-
-	thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
-{
-	int index;
-	u64 va;
-
-	va = PAGEALIGN(ifa, ps);
-	while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
-		vcpu->arch.itrs[index].page_flags = 0;
-
-	thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
-	va = PAGEALIGN(va, ps);
-	thash_purge_entries(vcpu, va, ps);
-}
-
-void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
-{
-	thash_purge_all(vcpu);
-}
-
-void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-	long psr;
-	local_irq_save(psr);
-	p->exit_reason = EXIT_REASON_PTC_G;
-
-	p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
-	p->u.ptc_g_data.vaddr = va;
-	p->u.ptc_g_data.ps = ps;
-	vmm_transition(vcpu);
-	/* Do Local Purge Here*/
-	vcpu_ptc_l(vcpu, va, ps);
-	local_irq_restore(psr);
-}
-
-
-void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
-{
-	vcpu_ptc_ga(vcpu, va, ps);
-}
-
-void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long ifa;
-
-	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
-	vcpu_ptc_e(vcpu, ifa);
-}
-
-void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long ifa, itir;
-
-	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
-	itir = vcpu_get_gr(vcpu, inst.M45.r2);
-	vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long ifa, itir;
-
-	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
-	itir = vcpu_get_gr(vcpu, inst.M45.r2);
-	vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long ifa, itir;
-
-	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
-	itir = vcpu_get_gr(vcpu, inst.M45.r2);
-	vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long ifa, itir;
-
-	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
-	itir = vcpu_get_gr(vcpu, inst.M45.r2);
-	vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long ifa, itir;
-
-	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
-	itir = vcpu_get_gr(vcpu, inst.M45.r2);
-	vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
-}
-
-void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long itir, ifa, pte, slot;
-
-	slot = vcpu_get_gr(vcpu, inst.M45.r3);
-	pte = vcpu_get_gr(vcpu, inst.M45.r2);
-	itir = vcpu_get_itir(vcpu);
-	ifa = vcpu_get_ifa(vcpu);
-	vcpu_itr_d(vcpu, slot, pte, itir, ifa);
-}
-
-
-
-void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long itir, ifa, pte, slot;
-
-	slot = vcpu_get_gr(vcpu, inst.M45.r3);
-	pte = vcpu_get_gr(vcpu, inst.M45.r2);
-	itir = vcpu_get_itir(vcpu);
-	ifa = vcpu_get_ifa(vcpu);
-	vcpu_itr_i(vcpu, slot, pte, itir, ifa);
-}
-
-void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long itir, ifa, pte;
-
-	itir = vcpu_get_itir(vcpu);
-	ifa = vcpu_get_ifa(vcpu);
-	pte = vcpu_get_gr(vcpu, inst.M45.r2);
-	vcpu_itc_d(vcpu, pte, itir, ifa);
-}
-
-void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long itir, ifa, pte;
-
-	itir = vcpu_get_itir(vcpu);
-	ifa = vcpu_get_ifa(vcpu);
-	pte = vcpu_get_gr(vcpu, inst.M45.r2);
-	vcpu_itc_i(vcpu, pte, itir, ifa);
-}
-
-/*************************************
- * Moves to semi-privileged registers
- *************************************/
-
-void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long imm;
-
-	if (inst.M30.s)
-		imm = -inst.M30.imm;
-	else
-		imm = inst.M30.imm;
-
-	vcpu_set_itc(vcpu, imm);
-}
-
-void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r2;
-
-	r2 = vcpu_get_gr(vcpu, inst.M29.r2);
-	vcpu_set_itc(vcpu, r2);
-}
-
-void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r1;
-
-	r1 = vcpu_get_itc(vcpu);
-	vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
-}
-
-/**************************************************************************
-  struct kvm_vcpu protection key register access routines
- **************************************************************************/
-
-unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
-{
-	return ((unsigned long)ia64_get_pkr(reg));
-}
-
-void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
-{
-	ia64_set_pkr(reg, val);
-}
-
-/********************************
- * Moves to privileged registers
- ********************************/
-unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
-					unsigned long val)
-{
-	union ia64_rr oldrr, newrr;
-	unsigned long rrval;
-	struct exit_ctl_data *p = &vcpu->arch.exit_data;
-	unsigned long psr;
-
-	oldrr.val = vcpu_get_rr(vcpu, reg);
-	newrr.val = val;
-	vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
-
-	switch ((unsigned long)(reg >> VRN_SHIFT)) {
-	case VRN6:
-		vcpu->arch.vmm_rr = vrrtomrr(val);
-		local_irq_save(psr);
-		p->exit_reason = EXIT_REASON_SWITCH_RR6;
-		vmm_transition(vcpu);
-		local_irq_restore(psr);
-		break;
-	case VRN4:
-		rrval = vrrtomrr(val);
-		vcpu->arch.metaphysical_saved_rr4 = rrval;
-		if (!is_physical_mode(vcpu))
-			ia64_set_rr(reg, rrval);
-		break;
-	case VRN0:
-		rrval = vrrtomrr(val);
-		vcpu->arch.metaphysical_saved_rr0 = rrval;
-		if (!is_physical_mode(vcpu))
-			ia64_set_rr(reg, rrval);
-		break;
-	default:
-		ia64_set_rr(reg, vrrtomrr(val));
-		break;
-	}
-
-	return (IA64_NO_FAULT);
-}
-
-void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r2;
-
-	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
-	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
-	vcpu_set_rr(vcpu, r3, r2);
-}
-
-void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-}
-
-void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-}
-
-void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r2;
-
-	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
-	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
-	vcpu_set_pmc(vcpu, r3, r2);
-}
-
-void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r2;
-
-	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
-	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
-	vcpu_set_pmd(vcpu, r3, r2);
-}
-
-void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	u64 r3, r2;
-
-	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
-	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
-	vcpu_set_pkr(vcpu, r3, r2);
-}
-
-void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r1;
-
-	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
-	r1 = vcpu_get_rr(vcpu, r3);
-	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r1;
-
-	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
-	r1 = vcpu_get_pkr(vcpu, r3);
-	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r1;
-
-	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
-	r1 = vcpu_get_dbr(vcpu, r3);
-	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r1;
-
-	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
-	r1 = vcpu_get_ibr(vcpu, r3);
-	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r1;
-
-	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
-	r1 = vcpu_get_pmc(vcpu, r3);
-	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
-{
-	/* FIXME: This could get called as a result of a rsvd-reg fault */
-	if (reg > (ia64_get_cpuid(3) & 0xff))
-		return 0;
-	else
-		return ia64_get_cpuid(reg);
-}
-
-void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r3, r1;
-
-	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
-	r1 = vcpu_get_cpuid(vcpu, r3);
-	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
-}
-
-void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
-{
-	VCPU(vcpu, tpr) = val;
-	vcpu->arch.irq_check = 1;
-}
-
-unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long r2;
-
-	r2 = vcpu_get_gr(vcpu, inst.M32.r2);
-	VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
-
-	switch (inst.M32.cr3) {
-	case 0:
-		vcpu_set_dcr(vcpu, r2);
-		break;
-	case 1:
-		vcpu_set_itm(vcpu, r2);
-		break;
-	case 66:
-		vcpu_set_tpr(vcpu, r2);
-		break;
-	case 67:
-		vcpu_set_eoi(vcpu, r2);
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long tgt = inst.M33.r1;
-	unsigned long val;
-
-	switch (inst.M33.cr3) {
-	case 65:
-		val = vcpu_get_ivr(vcpu);
-		vcpu_set_gr(vcpu, tgt, val, 0);
-		break;
-
-	case 67:
-		vcpu_set_gr(vcpu, tgt, 0L, 0);
-		break;
-	default:
-		val = VCPU(vcpu, vcr[inst.M33.cr3]);
-		vcpu_set_gr(vcpu, tgt, val, 0);
-		break;
-	}
-
-	return 0;
-}
-
-void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
-{
-
-	unsigned long mask;
-	struct kvm_pt_regs *regs;
-	struct ia64_psr old_psr, new_psr;
-
-	old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
-	regs = vcpu_regs(vcpu);
-	/* We only support guest as:
-	 *  vpsr.pk = 0
-	 *  vpsr.is = 0
-	 * Otherwise panic
-	 */
-	if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
-		panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
-				"& vpsr.is=0\n");
-
-	/*
-	 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
-	 * Since these bits will become 0, after success execution of each
-	 * instruction, we will change set them to mIA64_PSR
-	 */
-	VCPU(vcpu, vpsr) = val
-		& (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
-			IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
-
-	if (!old_psr.i && (val & IA64_PSR_I)) {
-		/* vpsr.i 0->1 */
-		vcpu->arch.irq_check = 1;
-	}
-	new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
-	/*
-	 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
-	 * , except for the following bits:
-	 *  ic/i/dt/si/rt/mc/it/bn/vm
-	 */
-	mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
-		IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
-		IA64_PSR_VM;
-
-	regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
-
-	check_mm_mode_switch(vcpu, old_psr, new_psr);
-
-	return ;
-}
-
-unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
-{
-	struct ia64_psr vpsr;
-
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
-
-	if (!vpsr.ic)
-		VCPU(vcpu, ifs) = regs->cr_ifs;
-	regs->cr_ifs = IA64_IFS_V;
-	return (IA64_NO_FAULT);
-}
-
-
-
-/**************************************************************************
-  VCPU banked general register access routines
- **************************************************************************/
-#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)	\
-	do {     							\
-		__asm__ __volatile__ (					\
-				";;extr.u %0 = %3,%6,16;;\n"		\
-				"dep %1 = %0, %1, 0, 16;;\n"		\
-				"st8 [%4] = %1\n"			\
-				"extr.u %0 = %2, 16, 16;;\n"		\
-				"dep %3 = %0, %3, %6, 16;;\n"		\
-				"st8 [%5] = %3\n"			\
-				::"r"(i), "r"(*b1unat), "r"(*b0unat),	\
-				"r"(*runat), "r"(b1unat), "r"(runat),	\
-				"i"(VMM_PT_REGS_R16_SLOT) : "memory");	\
-	} while (0)
-
-void vcpu_bsw0(struct kvm_vcpu *vcpu)
-{
-	unsigned long i;
-
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-	unsigned long *r = &regs->r16;
-	unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
-	unsigned long *b1 = &VCPU(vcpu, vgr[0]);
-	unsigned long *runat = &regs->eml_unat;
-	unsigned long *b0unat = &VCPU(vcpu, vbnat);
-	unsigned long *b1unat = &VCPU(vcpu, vnat);
-
-
-	if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
-		for (i = 0; i < 16; i++) {
-			*b1++ = *r;
-			*r++ = *b0++;
-		}
-		vcpu_bsw0_unat(i, b0unat, b1unat, runat,
-				VMM_PT_REGS_R16_SLOT);
-		VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
-	}
-}
-
-#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)	\
-	do {             						\
-		__asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"	\
-				"dep %1 = %0, %1, 16, 16;;\n"		\
-				"st8 [%4] = %1\n"			\
-				"extr.u %0 = %2, 0, 16;;\n"		\
-				"dep %3 = %0, %3, %6, 16;;\n"		\
-				"st8 [%5] = %3\n"			\
-				::"r"(i), "r"(*b0unat), "r"(*b1unat),	\
-				"r"(*runat), "r"(b0unat), "r"(runat),	\
-				"i"(VMM_PT_REGS_R16_SLOT) : "memory");	\
-	} while (0)
-
-void vcpu_bsw1(struct kvm_vcpu *vcpu)
-{
-	unsigned long i;
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-	unsigned long *r = &regs->r16;
-	unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
-	unsigned long *b1 = &VCPU(vcpu, vgr[0]);
-	unsigned long *runat = &regs->eml_unat;
-	unsigned long *b0unat = &VCPU(vcpu, vbnat);
-	unsigned long *b1unat = &VCPU(vcpu, vnat);
-
-	if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
-		for (i = 0; i < 16; i++) {
-			*b0++ = *r;
-			*r++ = *b1++;
-		}
-		vcpu_bsw1_unat(i, b0unat, b1unat, runat,
-				VMM_PT_REGS_R16_SLOT);
-		VCPU(vcpu, vpsr) |= IA64_PSR_BN;
-	}
-}
-
-void vcpu_rfi(struct kvm_vcpu *vcpu)
-{
-	unsigned long ifs, psr;
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	psr = VCPU(vcpu, ipsr);
-	if (psr & IA64_PSR_BN)
-		vcpu_bsw1(vcpu);
-	else
-		vcpu_bsw0(vcpu);
-	vcpu_set_psr(vcpu, psr);
-	ifs = VCPU(vcpu, ifs);
-	if (ifs >> 63)
-		regs->cr_ifs = ifs;
-	regs->cr_iip = VCPU(vcpu, iip);
-}
-
-/*
-   VPSR can't keep track of below bits of guest PSR
-   This function gets guest PSR
- */
-
-unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
-{
-	unsigned long mask;
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-
-	mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
-		IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
-	return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
-}
-
-void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long vpsr;
-	unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
-					| inst.M44.imm;
-
-	vpsr = vcpu_get_psr(vcpu);
-	vpsr &= (~imm24);
-	vcpu_set_psr(vcpu, vpsr);
-}
-
-void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long vpsr;
-	unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
-				| inst.M44.imm;
-
-	vpsr = vcpu_get_psr(vcpu);
-	vpsr |= imm24;
-	vcpu_set_psr(vcpu, vpsr);
-}
-
-/* Generate Mask
- * Parameter:
- *  bit -- starting bit
- *  len -- how many bits
- */
-#define MASK(bit,len)				   	\
-({							\
-		__u64	ret;				\
-							\
-		__asm __volatile("dep %0=-1, r0, %1, %2"\
-				: "=r" (ret):		\
-		  "M" (bit),				\
-		  "M" (len));				\
-		ret;					\
-})
-
-void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
-{
-	val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
-	vcpu_set_psr(vcpu, val);
-}
-
-void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long val;
-
-	val = vcpu_get_gr(vcpu, inst.M35.r2);
-	vcpu_set_psr_l(vcpu, val);
-}
-
-void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
-{
-	unsigned long val;
-
-	val = vcpu_get_psr(vcpu);
-	val = (val & MASK(0, 32)) | (val & MASK(35, 2));
-	vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
-}
-
-void vcpu_increment_iip(struct kvm_vcpu *vcpu)
-{
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
-	if (ipsr->ri == 2) {
-		ipsr->ri = 0;
-		regs->cr_iip += 16;
-	} else
-		ipsr->ri++;
-}
-
-void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
-{
-	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
-	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
-
-	if (ipsr->ri == 0) {
-		ipsr->ri = 2;
-		regs->cr_iip -= 16;
-	} else
-		ipsr->ri--;
-}
-
-/** Emulate a privileged operation.
- *
- *
- * @param vcpu virtual cpu
- * @cause the reason cause virtualization fault
- * @opcode the instruction code which cause virtualization fault
- */
-
-void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
-{
-	unsigned long status, cause, opcode ;
-	INST64 inst;
-
-	status = IA64_NO_FAULT;
-	cause = VMX(vcpu, cause);
-	opcode = VMX(vcpu, opcode);
-	inst.inst = opcode;
-	/*
-	 * Switch to actual virtual rid in rr0 and rr4,
-	 * which is required by some tlb related instructions.
-	 */
-	prepare_if_physical_mode(vcpu);
-
-	switch (cause) {
-	case EVENT_RSM:
-		kvm_rsm(vcpu, inst);
-		break;
-	case EVENT_SSM:
-		kvm_ssm(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_PSR:
-		kvm_mov_to_psr(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_PSR:
-		kvm_mov_from_psr(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_CR:
-		kvm_mov_from_cr(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_CR:
-		kvm_mov_to_cr(vcpu, inst);
-		break;
-	case EVENT_BSW_0:
-		vcpu_bsw0(vcpu);
-		break;
-	case EVENT_BSW_1:
-		vcpu_bsw1(vcpu);
-		break;
-	case EVENT_COVER:
-		vcpu_cover(vcpu);
-		break;
-	case EVENT_RFI:
-		vcpu_rfi(vcpu);
-		break;
-	case EVENT_ITR_D:
-		kvm_itr_d(vcpu, inst);
-		break;
-	case EVENT_ITR_I:
-		kvm_itr_i(vcpu, inst);
-		break;
-	case EVENT_PTR_D:
-		kvm_ptr_d(vcpu, inst);
-		break;
-	case EVENT_PTR_I:
-		kvm_ptr_i(vcpu, inst);
-		break;
-	case EVENT_ITC_D:
-		kvm_itc_d(vcpu, inst);
-		break;
-	case EVENT_ITC_I:
-		kvm_itc_i(vcpu, inst);
-		break;
-	case EVENT_PTC_L:
-		kvm_ptc_l(vcpu, inst);
-		break;
-	case EVENT_PTC_G:
-		kvm_ptc_g(vcpu, inst);
-		break;
-	case EVENT_PTC_GA:
-		kvm_ptc_ga(vcpu, inst);
-		break;
-	case EVENT_PTC_E:
-		kvm_ptc_e(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_RR:
-		kvm_mov_to_rr(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_RR:
-		kvm_mov_from_rr(vcpu, inst);
-		break;
-	case EVENT_THASH:
-		kvm_thash(vcpu, inst);
-		break;
-	case EVENT_TTAG:
-		kvm_ttag(vcpu, inst);
-		break;
-	case EVENT_TPA:
-		status = kvm_tpa(vcpu, inst);
-		break;
-	case EVENT_TAK:
-		kvm_tak(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_AR_IMM:
-		kvm_mov_to_ar_imm(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_AR:
-		kvm_mov_to_ar_reg(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_AR:
-		kvm_mov_from_ar_reg(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_DBR:
-		kvm_mov_to_dbr(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_IBR:
-		kvm_mov_to_ibr(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_PMC:
-		kvm_mov_to_pmc(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_PMD:
-		kvm_mov_to_pmd(vcpu, inst);
-		break;
-	case EVENT_MOV_TO_PKR:
-		kvm_mov_to_pkr(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_DBR:
-		kvm_mov_from_dbr(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_IBR:
-		kvm_mov_from_ibr(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_PMC:
-		kvm_mov_from_pmc(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_PKR:
-		kvm_mov_from_pkr(vcpu, inst);
-		break;
-	case EVENT_MOV_FROM_CPUID:
-		kvm_mov_from_cpuid(vcpu, inst);
-		break;
-	case EVENT_VMSW:
-		status = IA64_FAULT;
-		break;
-	default:
-		break;
-	};
-	/*Assume all status is NO_FAULT ?*/
-	if (status == IA64_NO_FAULT && cause != EVENT_RFI)
-		vcpu_increment_iip(vcpu);
-
-	recover_if_physical_mode(vcpu);
-}
-
-void init_vcpu(struct kvm_vcpu *vcpu)
-{
-	int i;
-
-	vcpu->arch.mode_flags = GUEST_IN_PHY;
-	VMX(vcpu, vrr[0]) = 0x38;
-	VMX(vcpu, vrr[1]) = 0x38;
-	VMX(vcpu, vrr[2]) = 0x38;
-	VMX(vcpu, vrr[3]) = 0x38;
-	VMX(vcpu, vrr[4]) = 0x38;
-	VMX(vcpu, vrr[5]) = 0x38;
-	VMX(vcpu, vrr[6]) = 0x38;
-	VMX(vcpu, vrr[7]) = 0x38;
-	VCPU(vcpu, vpsr) = IA64_PSR_BN;
-	VCPU(vcpu, dcr) = 0;
-	/* pta.size must not be 0.  The minimum is 15 (32k) */
-	VCPU(vcpu, pta) = 15 << 2;
-	VCPU(vcpu, itv) = 0x10000;
-	VCPU(vcpu, itm) = 0;
-	VMX(vcpu, last_itc) = 0;
-
-	VCPU(vcpu, lid) = VCPU_LID(vcpu);
-	VCPU(vcpu, ivr) = 0;
-	VCPU(vcpu, tpr) = 0x10000;
-	VCPU(vcpu, eoi) = 0;
-	VCPU(vcpu, irr[0]) = 0;
-	VCPU(vcpu, irr[1]) = 0;
-	VCPU(vcpu, irr[2]) = 0;
-	VCPU(vcpu, irr[3]) = 0;
-	VCPU(vcpu, pmv) = 0x10000;
-	VCPU(vcpu, cmcv) = 0x10000;
-	VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
-	VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
-	update_vhpi(vcpu, NULL_VECTOR);
-	VLSAPIC_XTP(vcpu) = 0x80;	/* disabled */
-
-	for (i = 0; i < 4; i++)
-		VLSAPIC_INSVC(vcpu, i) = 0;
-}
-
-void kvm_init_all_rr(struct kvm_vcpu *vcpu)
-{
-	unsigned long psr;
-
-	local_irq_save(psr);
-
-	/* WARNING: not allow co-exist of both virtual mode and physical
-	 * mode in same region
-	 */
-
-	vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
-	vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
-
-	if (is_physical_mode(vcpu)) {
-		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
-			panic_vm(vcpu, "Machine Status conflicts!\n");
-
-		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
-		ia64_dv_serialize_data();
-		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
-		ia64_dv_serialize_data();
-	} else {
-		ia64_set_rr((VRN0 << VRN_SHIFT),
-				vcpu->arch.metaphysical_saved_rr0);
-		ia64_dv_serialize_data();
-		ia64_set_rr((VRN4 << VRN_SHIFT),
-				vcpu->arch.metaphysical_saved_rr4);
-		ia64_dv_serialize_data();
-	}
-	ia64_set_rr((VRN1 << VRN_SHIFT),
-			vrrtomrr(VMX(vcpu, vrr[VRN1])));
-	ia64_dv_serialize_data();
-	ia64_set_rr((VRN2 << VRN_SHIFT),
-			vrrtomrr(VMX(vcpu, vrr[VRN2])));
-	ia64_dv_serialize_data();
-	ia64_set_rr((VRN3 << VRN_SHIFT),
-			vrrtomrr(VMX(vcpu, vrr[VRN3])));
-	ia64_dv_serialize_data();
-	ia64_set_rr((VRN5 << VRN_SHIFT),
-			vrrtomrr(VMX(vcpu, vrr[VRN5])));
-	ia64_dv_serialize_data();
-	ia64_set_rr((VRN7 << VRN_SHIFT),
-			vrrtomrr(VMX(vcpu, vrr[VRN7])));
-	ia64_dv_serialize_data();
-	ia64_srlz_d();
-	ia64_set_psr(psr);
-}
-
-int vmm_entry(void)
-{
-	struct kvm_vcpu *v;
-	v = current_vcpu;
-
-	ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
-						0, 0, 0, 0, 0, 0);
-	kvm_init_vtlb(v);
-	kvm_init_vhpt(v);
-	init_vcpu(v);
-	kvm_init_all_rr(v);
-	vmm_reset_entry();
-
-	return 0;
-}
-
-static void kvm_show_registers(struct kvm_pt_regs *regs)
-{
-	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
-
-	struct kvm_vcpu *vcpu = current_vcpu;
-	if (vcpu != NULL)
-		printk("vcpu 0x%p vcpu %d\n",
-		       vcpu, vcpu->vcpu_id);
-
-	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
-	       regs->cr_ipsr, regs->cr_ifs, ip);
-
-	printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
-	       regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
-	printk("rnat: %016lx bspstore: %016lx pr  : %016lx\n",
-	       regs->ar_rnat, regs->ar_bspstore, regs->pr);
-	printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
-	       regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
-	printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
-	printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0,
-							regs->b6, regs->b7);
-	printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
-	       regs->f6.u.bits[1], regs->f6.u.bits[0],
-	       regs->f7.u.bits[1], regs->f7.u.bits[0]);
-	printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
-	       regs->f8.u.bits[1], regs->f8.u.bits[0],
-	       regs->f9.u.bits[1], regs->f9.u.bits[0]);
-	printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
-	       regs->f10.u.bits[1], regs->f10.u.bits[0],
-	       regs->f11.u.bits[1], regs->f11.u.bits[0]);
-
-	printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1,
-							regs->r2, regs->r3);
-	printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8,
-							regs->r9, regs->r10);
-	printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
-							regs->r12, regs->r13);
-	printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
-							regs->r15, regs->r16);
-	printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
-							regs->r18, regs->r19);
-	printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
-							regs->r21, regs->r22);
-	printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
-							regs->r24, regs->r25);
-	printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
-							regs->r27, regs->r28);
-	printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
-							regs->r30, regs->r31);
-
-}
-
-void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
-{
-	va_list args;
-	char buf[256];
-
-	struct kvm_pt_regs *regs = vcpu_regs(v);
-	struct exit_ctl_data *p = &v->arch.exit_data;
-	va_start(args, fmt);
-	vsnprintf(buf, sizeof(buf), fmt, args);
-	va_end(args);
-	printk(buf);
-	kvm_show_registers(regs);
-	p->exit_reason = EXIT_REASON_VM_PANIC;
-	vmm_transition(v);
-	/*Never to return*/
-	while (1);
-}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
deleted file mode 100644
index 988911b..0000000
--- a/arch/ia64/kvm/vcpu.h
+++ /dev/null
@@ -1,752 +0,0 @@
-/*
- *  vcpu.h: vcpu routines
- *  	Copyright (c) 2005, Intel Corporation.
- *  	Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- *  	Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
- *
- * 	Copyright (c) 2007, Intel Corporation.
- *  	Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
- *	Xiantao Zhang (xiantao.zhang@intel.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-
-#ifndef __KVM_VCPU_H__
-#define __KVM_VCPU_H__
-
-#include <asm/types.h>
-#include <asm/fpu.h>
-#include <asm/processor.h>
-
-#ifndef __ASSEMBLY__
-#include "vti.h"
-
-#include <linux/kvm_host.h>
-#include <linux/spinlock.h>
-
-typedef unsigned long IA64_INST;
-
-typedef union U_IA64_BUNDLE {
-	unsigned long i64[2];
-	struct { unsigned long template:5, slot0:41, slot1a:18,
-		slot1b:23, slot2:41; };
-	/* NOTE: following doesn't work because bitfields can't cross natural
-	   size boundaries
-	   struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
-} IA64_BUNDLE;
-
-typedef union U_INST64_A5 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
-		imm9d:9, s:1, major:4; };
-} INST64_A5;
-
-typedef union U_INST64_B4 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
-		wh:2, d:1, un1:1, major:4; };
-} INST64_B4;
-
-typedef union U_INST64_B8 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
-} INST64_B8;
-
-typedef union U_INST64_B9 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
-} INST64_B9;
-
-typedef union U_INST64_I19 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
-} INST64_I19;
-
-typedef union U_INST64_I26 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_I26;
-
-typedef union U_INST64_I27 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
-} INST64_I27;
-
-typedef union U_INST64_I28 { /* not privileged (mov from AR) */
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_I28;
-
-typedef union U_INST64_M28 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M28;
-
-typedef union U_INST64_M29 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M29;
-
-typedef union U_INST64_M30 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
-		x3:3, s:1, major:4; };
-} INST64_M30;
-
-typedef union U_INST64_M31 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M31;
-
-typedef union U_INST64_M32 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M32;
-
-typedef union U_INST64_M33 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M33;
-
-typedef union U_INST64_M35 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-
-} INST64_M35;
-
-typedef union U_INST64_M36 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
-} INST64_M36;
-
-typedef union U_INST64_M37 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
-		i:1, major:4; };
-} INST64_M37;
-
-typedef union U_INST64_M41 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-} INST64_M41;
-
-typedef union U_INST64_M42 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M42;
-
-typedef union U_INST64_M43 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M43;
-
-typedef union U_INST64_M44 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
-} INST64_M44;
-
-typedef union U_INST64_M45 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
-} INST64_M45;
-
-typedef union U_INST64_M46 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
-		x3:3, un1:1, major:4; };
-} INST64_M46;
-
-typedef union U_INST64_M47 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
-} INST64_M47;
-
-typedef union U_INST64_M1{
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
-		x6:6, m:1, major:4; };
-} INST64_M1;
-
-typedef union U_INST64_M2{
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
-		x6:6, m:1, major:4; };
-} INST64_M2;
-
-typedef union U_INST64_M3{
-	IA64_INST inst;
-	struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
-		x6:6, s:1, major:4; };
-} INST64_M3;
-
-typedef union U_INST64_M4 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
-		x6:6, m:1, major:4; };
-} INST64_M4;
-
-typedef union U_INST64_M5 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
-		x6:6, s:1, major:4; };
-} INST64_M5;
-
-typedef union U_INST64_M6 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
-		x6:6, m:1, major:4; };
-} INST64_M6;
-
-typedef union U_INST64_M9 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
-		x6:6, m:1, major:4; };
-} INST64_M9;
-
-typedef union U_INST64_M10 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
-		x6:6, s:1, major:4; };
-} INST64_M10;
-
-typedef union U_INST64_M12 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
-		x6:6, m:1, major:4; };
-} INST64_M12;
-
-typedef union U_INST64_M15 {
-	IA64_INST inst;
-	struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
-		x6:6, s:1, major:4; };
-} INST64_M15;
-
-typedef union U_INST64 {
-	IA64_INST inst;
-	struct { unsigned long :37, major:4; } generic;
-	INST64_A5 A5;	/* used in build_hypercall_bundle only */
-	INST64_B4 B4;	/* used in build_hypercall_bundle only */
-	INST64_B8 B8;	/* rfi, bsw.[01] */
-	INST64_B9 B9;	/* break.b */
-	INST64_I19 I19;	/* used in build_hypercall_bundle only */
-	INST64_I26 I26;	/* mov register to ar (I unit) */
-	INST64_I27 I27;	/* mov immediate to ar (I unit) */
-	INST64_I28 I28;	/* mov from ar (I unit) */
-	INST64_M1  M1;	/* ld integer */
-	INST64_M2  M2;
-	INST64_M3  M3;
-	INST64_M4  M4;	/* st integer */
-	INST64_M5  M5;
-	INST64_M6  M6;	/* ldfd floating pointer 		*/
-	INST64_M9  M9;	/* stfd floating pointer		*/
-	INST64_M10 M10;	/* stfd floating pointer		*/
-	INST64_M12 M12;     /* ldfd pair floating pointer		*/
-	INST64_M15 M15;	/* lfetch + imm update			*/
-	INST64_M28 M28;	/* purge translation cache entry	*/
-	INST64_M29 M29;	/* mov register to ar (M unit)		*/
-	INST64_M30 M30;	/* mov immediate to ar (M unit)		*/
-	INST64_M31 M31;	/* mov from ar (M unit)			*/
-	INST64_M32 M32;	/* mov reg to cr			*/
-	INST64_M33 M33;	/* mov from cr				*/
-	INST64_M35 M35;	/* mov to psr				*/
-	INST64_M36 M36;	/* mov from psr				*/
-	INST64_M37 M37;	/* break.m				*/
-	INST64_M41 M41;	/* translation cache insert		*/
-	INST64_M42 M42;	/* mov to indirect reg/translation reg insert*/
-	INST64_M43 M43;	/* mov from indirect reg		*/
-	INST64_M44 M44;	/* set/reset system mask		*/
-	INST64_M45 M45;	/* translation purge			*/
-	INST64_M46 M46;	/* translation access (tpa,tak)		*/
-	INST64_M47 M47;	/* purge translation entry		*/
-} INST64;
-
-#define MASK_41 ((unsigned long)0x1ffffffffff)
-
-/* Virtual address memory attributes encoding */
-#define VA_MATTR_WB         0x0
-#define VA_MATTR_UC         0x4
-#define VA_MATTR_UCE        0x5
-#define VA_MATTR_WC         0x6
-#define VA_MATTR_NATPAGE    0x7
-
-#define PMASK(size)         (~((size) - 1))
-#define PSIZE(size)         (1UL<<(size))