Merge branches 'stable-3.0', 'ixp4xx-3.0', 'ixp4xx-local-3.0' and 'router-3.0' into build-3.0
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 841df7d2..b7a9a10 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -402,7 +402,7 @@
 	if (!buf)
 		return 1;
 
-	BUG_ON(buf->direction != dir);
+	BUG_ON(buf->direction != dir && buf->direction != DMA_BIDIRECTIONAL);
 
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
@@ -431,7 +431,7 @@
 	if (!buf)
 		return 1;
 
-	BUG_ON(buf->direction != dir);
+	BUG_ON(buf->direction != dir && buf->direction != DMA_BIDIRECTIONAL);
 
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 6f991c5..fe6dce9 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -80,6 +80,8 @@
 
 config MACH_GORAMO_MLR
 	bool "GORAMO Multi Link Router"
+	select I2C
+	select I2C_ALGOBIT
 	help
 	  Say 'Y' here if you want your kernel to support GORAMO
 	  MultiLink router.
@@ -181,6 +183,20 @@
 
 comment "IXP4xx Options"
 
+config IXP4XX_SUPPORT_425A0
+	bool "Support early IXP42x processors (stepping A0)"
+	default y
+	help
+	  Early IXP425 (and IXC1100) processors are missing several hardware
+	  features and require many work-arounds. With this option you will
+	  be able to run Linux on those old processors, at the cost of increased
+	  code size. Some features like hardware watchdog timer will only be
+	  available on IXP42x stepping B0 or later processors.
+	  IXP43x, IXP45x and IXP46x CPUs are not affected by this option.
+
+	  If you don't plan to use IXP425 stepping A0 CPUs, say "N".
+	  If unsure, say "Y".
+
 config IXP4XX_INDIRECT_PCI
 	bool "Use indirect PCI memory access"
 	depends on PCI
@@ -207,6 +223,10 @@
 	  need to use the indirect method instead. If you don't know
 	  what you need, leave this option unselected.
 
+config ZONE_DMA_ALL_KERNEL
+	bool
+	default y
+
 config IXP4XX_QMGR
 	tristate "IXP4xx Queue Manager support"
 	help
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index e9a5893..bcfc6d0 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -37,12 +37,6 @@
 
 
 /*
- * IXP4xx PCI read function is dependent on whether we are 
- * running A0 or B0 (AppleGate) silicon.
- */
-int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
-
-/*
  * Base address for PCI regsiter region
  */
 unsigned long ixp4xx_pci_reg_base = 0;
@@ -95,49 +89,40 @@
 	return 0;
 }
 
-int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data)
+int ixp4xx_pci_read(u32 addr, u32 cmd, u32* data)
 {
 	unsigned long flags;
 	int retval = 0;
-	int i;
 
 	spin_lock_irqsave(&ixp4xx_pci_lock, flags);
 
 	*PCI_NP_AD = addr;
 
-	/* 
-	 * PCI workaround  - only works if NP PCI space reads have 
+#ifdef CONFIG_IXP4XX_SUPPORT_425A0
+        if (cpu_is_ixp42x_rev_a0()) {
+		int i;
+	/*
+	 * PCI workaround  - only works if NP PCI space reads have
 	 * no side effects!!! Read 8 times. last one will be good.
 	 */
-	for (i = 0; i < 8; i++) {
-		*PCI_NP_CBE = cmd;
-		*data = *PCI_NP_RDATA;
-		*data = *PCI_NP_RDATA;
+		for (i = 0; i < 8; i++) {
+			*PCI_NP_CBE = cmd;
+			*data = *PCI_NP_RDATA;
+			*data = *PCI_NP_RDATA;
+		}
+		goto out;
 	}
-
-	if(check_master_abort())
-		retval = 1;
-
-	spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-	return retval;
-}
-
-int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data)
-{
-	unsigned long flags;
-	int retval = 0;
-
-	spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-
-	*PCI_NP_AD = addr;
-
-	/* set up and execute the read */    
+#endif
+	/* set up and execute the read */
 	*PCI_NP_CBE = cmd;
 
 	/* the result of the read is now in NP_RDATA */
-	*data = *PCI_NP_RDATA; 
+	*data = *PCI_NP_RDATA;
 
-	if(check_master_abort())
+#ifdef CONFIG_IXP4XX_SUPPORT_425A0
+out:
+#endif
+	if (check_master_abort())
 		retval = 1;
 
 	spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
@@ -239,7 +224,7 @@
 	return 0xffffffff;
 }
 
-static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
+int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
 {
 	u32 n, byte_enables, addr, data;
 	u8 bus_num = bus->number;
@@ -262,7 +247,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int ixp4xx_pci_write_config(struct pci_bus *bus,  unsigned int devfn, int where, int size, u32 value)
+int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
 {
 	u32 n, byte_enables, addr, data;
 	u8 bus_num = bus->number;
@@ -344,20 +329,6 @@
 
 void __init ixp4xx_pci_preinit(void)
 {
-	unsigned long cpuid = read_cpuid_id();
-
-	/*
-	 * Determine which PCI read method to use.
-	 * Rev 0 IXP425 requires workaround.
-	 */
-	if (!(cpuid & 0xf) && cpu_is_ixp42x()) {
-		printk("PCI: IXP42x A0 silicon detected - "
-			"PCI Non-Prefetch Workaround Enabled\n");
-		ixp4xx_pci_read = ixp4xx_pci_read_errata;
-	} else
-		ixp4xx_pci_read = ixp4xx_pci_read_no_errata;
-
-
 	/* hook in our fault handler for PCI errors */
 	hook_fault_code(16+6, abort_handler, SIGBUS, 0,
 			"imprecise external abort");
@@ -404,6 +375,7 @@
 		 * Enable the IO window to be way up high, at 0xfffffc00
 		 */
 		local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01);
+		local_write_config(0x40, 4, 0x000080FF); /* No TRDY time limit */
 	} else {
 		printk("PCI: IXP4xx is target - No bus scan performed\n");
 	}
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 0777257..abf2b6e 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -64,6 +64,11 @@
 		.pfn		= __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
 		.length		= IXP4XX_PCI_CFG_REGION_SIZE,
 		.type		= MT_DEVICE
+	}, {	/* Queue Manager */
+		.virtual	= IXP4XX_QMGR_BASE_VIRT,
+		.pfn		= __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS),
+		.length		= IXP4XX_QMGR_REGION_SIZE,
+		.type		= MT_DEVICE
 	},
 #ifdef CONFIG_DEBUG_LL
 	{	/* Debug UART mapping */
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index 3e8c0e3..f2eec09 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -5,18 +5,29 @@
 
 #include <linux/delay.h>
 #include <linux/hdlc.h>
-#include <linux/i2c-gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/serial_8250.h>
+#include <asm-generic/rtc.h>
 #include <asm/mach-types.h>
 #include <asm/system.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
 #include <asm/mach/pci.h>
 
+#define DEBUG_PCI 0
+#define DEBUG_MSR 0
+#define DEBUG_IRQ 0
+
+#define SLOT_CS5536		0x01	/* IDSEL = AD31 */
+#define  DEV_CS5536_SB		0
+#define  DEV_CS5536_OHCI	1
+#define  DEV_CS5536_EHCI	2
+#define  DEV_CS5536_IDE		3
 #define SLOT_ETHA		0x0B	/* IDSEL = AD21 */
 #define SLOT_ETHB		0x0C	/* IDSEL = AD20 */
 #define SLOT_MPCI		0x0D	/* IDSEL = AD19 */
@@ -26,7 +37,7 @@
 #define GPIO_SCL		0
 #define GPIO_SDA		1
 #define GPIO_STR		2
-#define GPIO_IRQ_NEC		3
+#define GPIO_IRQ_NEC_CS5536	3
 #define GPIO_IRQ_ETHA		4
 #define GPIO_IRQ_ETHB		5
 #define GPIO_HSS0_DCD_N		6
@@ -54,13 +65,16 @@
 #define CFG_ETH0_ADDRESS	0x40 /* 6 bytes */
 #define CFG_ETH1_ADDRESS	0x46 /* 6 bytes */
 #define CFG_REV			0x4C /* u32 */
+#define  CFG_REV_MULTILINK	1
+#define  CFG_REV_MICRO		2
+#define  CFG_REV_MULTILINK2	3
 #define CFG_SDRAM_SIZE		0x50 /* u32 */
 #define CFG_SDRAM_CONF		0x54 /* u32 */
 #define CFG_SDRAM_MODE		0x58 /* u32 */
 #define CFG_SDRAM_REFRESH	0x5C /* u32 */
 
 #define CFG_HW_BITS		0x60 /* u32 */
-#define  CFG_HW_USB_PORTS	0x00000007 /* 0 = no NEC chip, 1-5 = ports # */
+#define  CFG_HW_USB_PORTS	0x00000007 /* 0 = no chip, 1-5 = ports # */
 #define  CFG_HW_HAS_PCI_SLOT	0x00000008
 #define  CFG_HW_HAS_ETH0	0x00000010
 #define  CFG_HW_HAS_ETH1	0x00000020
@@ -69,30 +83,101 @@
 #define  CFG_HW_HAS_UART0	0x00000100
 #define  CFG_HW_HAS_UART1	0x00000200
 #define  CFG_HW_HAS_EEPROM	0x00000400
+#define  CFG_HW_HAS_IDE		0x00000800
+#define  CFG_HW_HAS_RTC		0x00001000
 
 #define FLASH_CMD_READ_ARRAY	0xFF
 #define FLASH_CMD_READ_ID	0x90
 #define FLASH_SER_OFF		0x102 /* 0x81 in 16-bit mode */
 
+#define CS5536_ADDRESS		(1 << (32 - SLOT_CS5536))
+
+/* Use IRQ numbers normally used by GPIO lines which are used as outputs */
+#define IRQ_CS5536_IDE		IRQ_IXP4XX_GPIO0 /* CS5536 IRQ 14 - hardwired */
+#define IRQ_CS5536_USB		IRQ_IXP4XX_GPIO1 /* CS5536 IRQ 15 - IRQ mapper*/
+#define IRQ_CS5536_CPU_BASE	IRQ_CS5536_IDE
+#define IRQ_CS5536_SLAVE_PIC_BASE (14 - 8) /* CS5536 IRQs used: 14 and 15 */
+
+#define RTC_CENTURY		14 /* offset in CMOS RAM space */
+
 static u32 hw_bits = 0xFFFFFFFD;    /* assume all hardware present */;
 static u8 control_value;
 
-static void set_scl(u8 value)
+static inline int has_nec(void)
 {
-	gpio_line_set(GPIO_SCL, !!value);
-	udelay(3);
+	return (system_rev == CFG_REV_MULTILINK) &&
+		(hw_bits & CFG_HW_USB_PORTS);
 }
 
-static void set_sda(u8 value)
+static inline int has_cs5536(void)
 {
-	gpio_line_set(GPIO_SDA, !!value);
-	udelay(3);
+	return (system_rev == CFG_REV_MULTILINK2) &&
+		(hw_bits & (CFG_HW_USB_PORTS | CFG_HW_HAS_IDE |
+			    CFG_HW_HAS_RTC));
 }
 
-static void set_str(u8 value)
+static inline int has_pci(void)
+{
+	return has_nec() || has_cs5536() || (hw_bits & CFG_HW_HAS_PCI_SLOT);
+}
+
+/* 2-wire I^2C (100 kHz) shared with 3-wire 74HC4094 */
+
+static inline void set_scl(int value)
+{
+	/* has pull-up on SCL */
+	gpio_line_config(GPIO_SCL, value ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT);
+}
+
+static inline void set_sda(int value)
+{
+	/* has pull-up on SDA */
+	gpio_line_config(GPIO_SDA, value ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT);
+}
+
+static void i2c_set_scl(void *data, int value)
+{
+	set_scl(value);
+}
+
+static void i2c_set_sda(void *data, int value)
+{
+	set_sda(value);
+}
+
+static int i2c_get_scl(void *data)
+{
+	int value;
+	gpio_line_get(GPIO_SCL, &value);
+	return value;
+}
+
+static int i2c_get_sda(void *data)
+{
+	int value;
+	gpio_line_get(GPIO_SDA, &value);
+	return value;
+}
+
+static struct i2c_algo_bit_data i2c_bit_data = {
+	.setscl    = i2c_set_scl,
+	.setsda    = i2c_set_sda,
+	.getscl    = i2c_get_scl,
+	.getsda    = i2c_get_sda,
+	.udelay    = 5,		/* 100 kHz */
+	.timeout   = HZ / 10,	/* 100 ms */
+};
+
+static struct i2c_adapter i2c_adapter = {
+	.owner     = THIS_MODULE,
+	.name      = "i2c-multilink",
+	.algo_data = &i2c_bit_data,
+	.class     = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+};
+
+static inline void set_str(int value)
 {
 	gpio_line_set(GPIO_STR, !!value);
-	udelay(3);
 }
 
 static inline void set_control(int line, int value)
@@ -104,99 +189,138 @@
 }
 
 
-static void output_control(void)
+static void output_control_nolock(void)
 {
 	int i;
 
-	gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT);
-	gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT);
-
 	for (i = 0; i < 8; i++) {
 		set_scl(0);
+		udelay(5);
 		set_sda(control_value & (0x80 >> i)); /* MSB first */
+		udelay(5);
 		set_scl(1);	/* active edge */
+		udelay(5);
 	}
 
 	set_str(1);
+	udelay(5);
 	set_str(0);
+	udelay(5);
 
 	set_scl(0);
+	udelay(5);
 	set_sda(1);		/* Be ready for START */
+	udelay(5);
 	set_scl(1);
+	udelay(5);
 }
 
+static void output_control(void)
+{
+	rt_mutex_lock(&i2c_adapter.bus_lock);
+	output_control_nolock();
+	rt_mutex_unlock(&i2c_adapter.bus_lock);
+}
 
-static void (*set_carrier_cb_tab[2])(void *pdev, int carrier);
+/* HSS */
+
+static struct hss {
+	spinlock_t lock;
+	void (*set_carrier_cb)(void *pdev, int carrier);
+	void *cb_pdev;
+}hss_tab[2];
 
 static int hss_set_clock(int port, unsigned int clock_type)
 {
 	int ctrl_int = port ? CONTROL_HSS1_CLK_INT : CONTROL_HSS0_CLK_INT;
 
-	switch (clock_type) {
-	case CLOCK_DEFAULT:
+	if (clock_type == CLOCK_DEFAULT)
+		clock_type = CLOCK_EXT;
+
+	switch (clock_type & CLOCK_TYPE_MASK) {
 	case CLOCK_EXT:
 		set_control(ctrl_int, 0);
 		output_control();
-		return CLOCK_EXT;
+		return clock_type;
 
 	case CLOCK_INT:
 		set_control(ctrl_int, 1);
 		output_control();
-		return CLOCK_INT;
+		return clock_type;
 
 	default:
 		return -EINVAL;
 	}
 }
 
+static int hss_carrier(int port)
+{
+	int i;
+	gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i);
+	return !i;		/* inverted */
+}
+
 static irqreturn_t hss_dcd_irq(int irq, void *pdev)
 {
-	int i, port = (irq == IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N));
-	gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i);
-	set_carrier_cb_tab[port](pdev, !i);
+	int port = (irq == IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N));
+	struct hss *hss = pdev;
+
+	spin_lock(&hss->lock);
+	if (hss->set_carrier_cb)
+		hss->set_carrier_cb(hss->cb_pdev, hss_carrier(port));
+	spin_unlock(&hss->lock);
+
 	return IRQ_HANDLED;
 }
 
-
-static int hss_open(int port, void *pdev,
-		    void (*set_carrier_cb)(void *pdev, int carrier))
+static void hss_open(int port, void *pdev, void (*set_carrier_cb)(void *pdev, int carrier))
 {
-	int i, irq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hss_tab[port].lock, flags);
+	hss_tab[!!port].set_carrier_cb = set_carrier_cb;
+	hss_tab[!!port].cb_pdev = pdev;
+
+	set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 0);
+	gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 0);
+	spin_unlock_irqrestore(&hss_tab[port].lock, flags);
+
+	output_control();
+}
+
+static void hss_close(int port, void *pdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hss_tab[port].lock, flags);
+	hss_tab[!!port].set_carrier_cb = NULL;
+
+	set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 1);
+	gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 1);
+	spin_unlock_irqrestore(&hss_tab[port].lock, flags);
+
+	output_control();
+}
+
+static int hss_setup(int port)
+{
+	int irq, err;
+
+	spin_lock_init(&hss_tab[port].lock);
 
 	if (!port)
 		irq = IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N);
 	else
 		irq = IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N);
 
-	gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i);
-	set_carrier_cb(pdev, !i);
-
-	set_carrier_cb_tab[!!port] = set_carrier_cb;
-
-	if ((i = request_irq(irq, hss_dcd_irq, 0, "IXP4xx HSS", pdev)) != 0) {
-		printk(KERN_ERR "ixp4xx_hss: failed to request IRQ%i (%i)\n",
-		       irq, i);
-		return i;
+	err = request_irq(irq, hss_dcd_irq, 0, "IXP4xx HSS", &hss_tab[port]);
+	if (err) {
+		printk(KERN_ERR "ixp4xx_hss: failed to request IRQ%i (%i)\n", irq, err);
+		return err;
 	}
-
-	set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 0);
-	output_control();
-	gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 0);
 	return 0;
 }
 
-static void hss_close(int port, void *pdev)
-{
-	free_irq(port ? IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N) :
-		 IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), pdev);
-	set_carrier_cb_tab[!!port] = NULL; /* catch bugs */
-
-	set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 1);
-	output_control();
-	gpio_line_set(port ? GPIO_HSS1_RTS_N : GPIO_HSS0_RTS_N, 1);
-}
-
-
 /* Flash memory */
 static struct flash_platform_data flash_data = {
 	.map_name	= "cfi_probe",
@@ -215,20 +339,6 @@
 	.resource	= &flash_resource,
 };
 
-
-/* I^2C interface */
-static struct i2c_gpio_platform_data i2c_data = {
-	.sda_pin	= GPIO_SDA,
-	.scl_pin	= GPIO_SCL,
-};
-
-static struct platform_device device_i2c = {
-	.name		= "i2c-gpio",
-	.id		= 0,
-	.dev		= { .platform_data = &i2c_data },
-};
-
-
 /* IXP425 2 UART ports */
 static struct resource uart_resources[] = {
 	{
@@ -308,11 +418,13 @@
 		.set_clock	= hss_set_clock,
 		.open		= hss_open,
 		.close		= hss_close,
+		.get_carrier	= hss_carrier,
 		.txreadyq	= 34,
 	}, {
 		.set_clock	= hss_set_clock,
 		.open		= hss_open,
 		.close		= hss_close,
+		.get_carrier	= hss_carrier,
 		.txreadyq	= 35,
 	}
 };
@@ -329,6 +441,24 @@
 	}
 };
 
+/* CS5536 battery-backed RTC */
+static struct cmos_rtc_board_info rtc_plat = {
+	.rtc_century   = RTC_CENTURY,
+};
+
+static struct resource rtc_resource = {
+	.start = 0x70,
+	.end   = 0x71,
+	.flags = IORESOURCE_IO
+};
+
+static struct platform_device device_rtc = {
+	.name              = "rtc_cmos",
+	.num_resources     = 1,
+	.resource          = &rtc_resource,
+	.dev.platform_data = &rtc_plat,
+};
+
 
 static struct platform_device *device_tab[6] __initdata = {
 	&device_flash,		/* index 0 */
@@ -385,6 +515,29 @@
 		iounmap(flash);
 	}
 
+	gpio_line_config(GPIO_HSS0_RTS_N, IXP4XX_GPIO_OUT);
+	gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT);
+	gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN);
+	gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN);
+	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH);
+	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH);
+
+	gpio_line_set(GPIO_SCL, 0);
+	gpio_line_set(GPIO_SDA, 0);
+	gpio_line_config(GPIO_STR, IXP4XX_GPIO_OUT);
+	set_control(CONTROL_HSS0_DTR_N, 1);
+	set_control(CONTROL_HSS1_DTR_N, 1);
+	set_control(CONTROL_EEPROM_WC_N, 1);
+	set_control(CONTROL_PCI_RESET_N, 0);
+	output_control_nolock();
+
+	msleep(1);
+
+	set_control(CONTROL_PCI_RESET_N, 1);
+	output_control_nolock();
+
+	msleep(100);	      /* Wait for PCI devices to initialize */
+
 	switch (hw_bits & (CFG_HW_HAS_UART0 | CFG_HW_HAS_UART1)) {
 	case CFG_HW_HAS_UART0:
 		memset(&uart_data[1], 0, sizeof(uart_data[1]));
@@ -405,71 +558,606 @@
 	if (hw_bits & CFG_HW_HAS_ETH1)
 		device_tab[devices++] = &device_eth_tab[1]; /* max index 3 */
 
-	if (hw_bits & CFG_HW_HAS_HSS0)
+	if ((hw_bits & CFG_HW_HAS_HSS0) && !hss_setup(0))
 		device_tab[devices++] = &device_hss_tab[0]; /* max index 4 */
-	if (hw_bits & CFG_HW_HAS_HSS1)
+
+	if ((hw_bits & CFG_HW_HAS_HSS1) && !hss_setup(1))
 		device_tab[devices++] = &device_hss_tab[1]; /* max index 5 */
 
-	if (hw_bits & CFG_HW_HAS_EEPROM)
-		device_tab[devices++] = &device_i2c; /* max index 6 */
-
-	gpio_line_config(GPIO_SCL, IXP4XX_GPIO_OUT);
-	gpio_line_config(GPIO_SDA, IXP4XX_GPIO_OUT);
-	gpio_line_config(GPIO_STR, IXP4XX_GPIO_OUT);
-	gpio_line_config(GPIO_HSS0_RTS_N, IXP4XX_GPIO_OUT);
-	gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT);
-	gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN);
-	gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN);
-	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH);
-	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH);
-
-	set_control(CONTROL_HSS0_DTR_N, 1);
-	set_control(CONTROL_HSS1_DTR_N, 1);
-	set_control(CONTROL_EEPROM_WC_N, 1);
-	set_control(CONTROL_PCI_RESET_N, 1);
-	output_control();
-
-	msleep(1);	      /* Wait for PCI devices to initialize */
+	if (hw_bits & CFG_HW_HAS_RTC)
+		device_tab[devices++] = &device_rtc; /* max index 6 */
 
 	flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
 	flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
 
+	/* Make sure I^2C is initialized before loading HSS driver */
+	if (i2c_bit_add_numbered_bus(&i2c_adapter))
+		panic(KERN_CRIT "FATAL: Unable to initialize I2C bus\n");
+
 	platform_add_devices(device_tab, devices);
 }
 
 
 #ifdef CONFIG_PCI
+union pci_config_space { /* little-endian */
+	struct {
+		__le16 vendor_id, device_id;
+		__le16 command, status;
+		u8 revision, class[3];
+		u8 cacheline, latency, header, bist;
+		__le32 bars[6];
+		__le32 cis_pointer;
+		__le16 sub_vendor_id, sub_device_id;
+		__le32 rom_bar;
+		u8 caps_pointer, res[7];
+		u8 irq, irq_pin, min_gnt, max_lat;
+		__le32 r40, r44, r48, r4c, r50;
+	} regs;
+	u8 regs8[0];
+	__le16 regs16[0];
+	__le32 regs32[0];
+};
+
+struct cs5536_pci_device {
+	union pci_config_space data;
+	union pci_config_space mask; /* 0 = read-only, 1 = read/write */
+};
+
+static struct cs5536_pci_device sb = {
+	.data.regs.vendor_id     = ~0,
+	.data.regs.device_id     = ~0,
+	.data.regs.class         = {0, 0x80, 6},
+	.data.regs.header        = 0x80,
+	/* mask - R/O */
+};
+
+static struct cs5536_pci_device ohci = {
+	.data.regs.vendor_id     = cpu_to_le16(PCI_VENDOR_ID_AMD),
+	.data.regs.device_id     = cpu_to_le16(PCI_DEVICE_ID_AMD_CS5536_OHC),
+	.data.regs.command       = cpu_to_le16(0x0006),
+	.data.regs.status        = cpu_to_le16(0x0230),
+	.data.regs.class         = {0x10, 0x03, 0x0C},
+	.data.regs.sub_vendor_id = cpu_to_le16(PCI_VENDOR_ID_AMD),
+	.data.regs.sub_device_id = cpu_to_le16(PCI_DEVICE_ID_AMD_CS5536_OHC),
+	.data.regs.caps_pointer  = 0x40,
+	.data.regs.irq_pin       = 1,
+	.data.regs.r40           = cpu_to_le32(0xC8020001), /* Capabilities */
+
+	.mask.regs.command       = ~0,
+	.mask.regs.cacheline     = ~0,
+	.mask.regs.latency       = ~0,
+	.mask.regs.bars[0]       = cpu_to_le32(0xFFFFF000), /* 4 KB (P2D desc)*/
+	.mask.regs.irq           = ~0,
+};
+
+static struct cs5536_pci_device ehci = {
+	.data.regs.vendor_id     = cpu_to_le16(PCI_VENDOR_ID_AMD),
+	.data.regs.device_id     = cpu_to_le16(PCI_DEVICE_ID_AMD_CS5536_EHC),
+	.data.regs.command       = cpu_to_le16(0x0006),
+	.data.regs.status        = cpu_to_le16(0x0230),
+	.data.regs.class         = {0x20, 0x03, 0x0C},
+	.data.regs.sub_vendor_id = cpu_to_le16(PCI_VENDOR_ID_AMD),
+	.data.regs.sub_device_id = cpu_to_le16(PCI_DEVICE_ID_AMD_CS5536_EHC),
+	.data.regs.caps_pointer  = 0x40,
+	.data.regs.irq_pin       = 1,
+	.data.regs.r40           = cpu_to_le32(0xC8020001), /* Capabilities */
+	/* mask */
+	.mask.regs.command       = ~0,
+	.mask.regs.cacheline     = ~0,
+	.mask.regs.latency       = ~0,
+	.mask.regs.bars[0]       = cpu_to_le32(0xFFFFF000), /* 4 KB (P2D desc)*/
+	.mask.regs.irq           = ~0,
+};
+
+static struct cs5536_pci_device ide = {
+	.data.regs.vendor_id     = cpu_to_le16(PCI_VENDOR_ID_AMD),
+	.data.regs.device_id     = cpu_to_le16(PCI_DEVICE_ID_AMD_CS5536_IDE),
+	.data.regs.command       = cpu_to_le16(0x0006),
+	.data.regs.status        = cpu_to_le16(0x0230),
+	.data.regs.class         = {0x85, 0x01, 0x01},
+	.data.regs.bars[4]       = cpu_to_le32(1), /* BM DMA registers */
+	.data.regs.sub_vendor_id = cpu_to_le16(PCI_VENDOR_ID_AMD),
+	.data.regs.sub_device_id = cpu_to_le16(PCI_DEVICE_ID_AMD_CS5536_IDE),
+	.data.regs.irq_pin       = 2,
+	/* mask */
+	.mask.regs.command       = ~0,
+	.mask.regs.cacheline     = ~0,
+	.mask.regs.latency       = ~0,
+	.mask.regs.bars[4]       = cpu_to_le32(0xFFFFFFF8), /* 8 bytes */
+	.mask.regs.irq           = ~0,
+	.mask.regs.r40           = ~0, /* IDE_CFG */
+	.mask.regs.r48           = ~0, /* IDE_DTC */
+	.mask.regs.r4c           = ~0, /* IDE_CAST */
+	.mask.regs.r50           = ~0, /* IDE_ETC */
+};
+
+/*
+ * Mask table, bits to mask for quantity of size 1, 2 or 4 bytes.
+ * 0 and 3 are not valid indexes...
+ */
+static const u32 bytemask[] = {
+	/*0*/	0,
+	/*1*/	0xff,
+	/*2*/	0xffff,
+	/*3*/	0,
+	/*4*/	0xffffffff,
+};
+
+static u8 cs5536_slave_irq_mask = 0xFF; /* IDE and USB only */
+
+static u32 msr_id(u32 msr)
+{
+	return ((msr << 9) & 0xFF800000) | (msr & 0x3FFF);
+}
+
+static void read_msr(u32 msr, u32 *h, u32 *l)
+{
+	if (ixp4xx_pci_write(CS5536_ADDRESS + 0xF4, NP_CMD_CONFIGWRITE,
+			     msr_id(msr)))
+		goto error;
+	if (ixp4xx_pci_read(CS5536_ADDRESS + 0xF8, NP_CMD_CONFIGREAD, l))
+		goto error;
+	if (ixp4xx_pci_read(CS5536_ADDRESS + 0xFC, NP_CMD_CONFIGREAD, h))
+		goto error;
+#if DEBUG_MSR
+	printk(KERN_DEBUG "read_msr %08X: %08X %08X\n", msr, *h, *l);
+#endif
+	return;
+error:
+	printk(KERN_CRIT "read_msr(0x%08X) failed\n", msr);
+}
+
+
+static void write_msr(u32 msr, u32 h, u32 l)
+{
+	if (ixp4xx_pci_write(CS5536_ADDRESS + 0xF4, NP_CMD_CONFIGWRITE,
+			     msr_id(msr)))
+		goto error;
+	if (ixp4xx_pci_write(CS5536_ADDRESS + 0xF8, NP_CMD_CONFIGWRITE, l))
+		goto error;
+	if (ixp4xx_pci_write(CS5536_ADDRESS + 0xFC, NP_CMD_CONFIGWRITE, h))
+		goto error;
+#if DEBUG_MSR
+	printk(KERN_DEBUG "write_msr %08X: %08X %08X\n", msr, h, l);
+#endif
+	return;
+error:
+	printk(KERN_CRIT "write_msr(0x%08X, 0x%08X, 0x%08X) failed\n",
+	       msr, h, l);
+}
+
+
+static inline void setup_cs5536_gpio(u16 value, u16 address)
+{
+	outl(((~(u32)value) << 16) | value, address);
+}
+
+static void cs5536_irq_ack(struct irq_data *data)
+{
+#if DEBUG_IRQ
+	printk(KERN_INFO "ACK %u GPIO %X\n",
+	       data->irq, !!(readb(IXP4XX_GPIO_BASE_VIRT + 0x0B) & 8));
+#endif
+}
+
+static void cs5536_irq_mask(struct irq_data *data)
+{
+#if DEBUG_IRQ
+	printk(KERN_INFO "MASK %u GPIO %X\n",
+	       data->irq, !!(readb(IXP4XX_GPIO_BASE_VIRT + 0x0B) & 8));
+#endif
+	cs5536_slave_irq_mask |= 1 << (data->irq - IRQ_CS5536_CPU_BASE +
+				       IRQ_CS5536_SLAVE_PIC_BASE);
+	outb(cs5536_slave_irq_mask, 0xA1);
+#if DEBUG_IRQ
+	printk(KERN_INFO "        GPIO %X\n",
+	       !!(readb(IXP4XX_GPIO_BASE_VIRT + 0x0B) & 8));
+#endif
+}
+
+static void cs5536_irq_unmask(struct irq_data *data)
+{
+#if DEBUG_IRQ
+	printk(KERN_INFO "UNMASK %u GPIO %X\n",
+	       data->irq, !!(readb(IXP4XX_GPIO_BASE_VIRT + 0x0B) & 8));
+#endif
+	cs5536_slave_irq_mask &= ~(1 << (data->irq - IRQ_CS5536_CPU_BASE +
+					 IRQ_CS5536_SLAVE_PIC_BASE));
+	outb(cs5536_slave_irq_mask, 0xA1);
+#if DEBUG_IRQ
+	printk(KERN_INFO "        GPIO %X\n",
+	       !!(readb(IXP4XX_GPIO_BASE_VIRT + 0x0B) & 8));
+#endif
+}
+
+static void cs5536_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	u32 h, l;
+
+#if DEBUG_IRQ
+	printk(KERN_INFO "HANDLER %u GPIO %X status %x\n",
+	       irq, !!(readb(IXP4XX_GPIO_BASE_VIRT + 0x0B) & 8), desc->status);
+#endif
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
+	read_msr(0x51400027, &h, &l);
+	if (l & 0x40000000) {
+		struct irq_desc *d = irq_to_desc(IRQ_CS5536_USB);
+#if DEBUG_IRQ
+		printk(KERN_INFO "  status %X\n", d->status);
+		//BUG_ON (d->status & IRQ_INPROGRESS);
+#endif
+		d->handle_irq(IRQ_CS5536_USB, d);
+	}
+	if (l & 0x01000000) {
+		struct irq_desc *d = irq_to_desc(IRQ_CS5536_IDE);
+#if DEBUG_IRQ
+		printk(KERN_INFO "  status %X\n", d->status);
+#endif
+		d->handle_irq(IRQ_CS5536_IDE, d);
+	}
+}
+
 static void __init gmlr_pci_preinit(void)
 {
+	gpio_line_config(GPIO_IRQ_ETHA, IXP4XX_GPIO_IN);
+	gpio_line_config(GPIO_IRQ_ETHB, IXP4XX_GPIO_IN);
+	gpio_line_config(GPIO_IRQ_NEC_CS5536, IXP4XX_GPIO_IN);
+	gpio_line_config(GPIO_IRQ_MPCI, IXP4XX_GPIO_IN);
 	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW);
 	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW);
-	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW);
+	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC_CS5536), IRQ_TYPE_LEVEL_LOW);
 	irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW);
 	ixp4xx_pci_preinit();
 }
 
+static struct irq_chip cs5536_irqchip = {
+	.name = "CS5536",
+	.irq_ack = cs5536_irq_ack,
+	.irq_mask = cs5536_irq_mask,
+	.irq_unmask = cs5536_irq_unmask,
+};
+
 static void __init gmlr_pci_postinit(void)
 {
-	if ((hw_bits & CFG_HW_USB_PORTS) >= 2 &&
-	    (hw_bits & CFG_HW_USB_PORTS) < 5) {
+	if (has_nec() && (hw_bits & CFG_HW_USB_PORTS) < 5) {
 		/* need to adjust number of USB ports on NEC chip */
 		u32 value, addr = BIT(32 - SLOT_NEC) | 0xE0;
 		if (!ixp4xx_pci_read(addr, NP_CMD_CONFIGREAD, &value)) {
 			value &= ~7;
-			value |= (hw_bits & CFG_HW_USB_PORTS);
+			value |= hw_bits & CFG_HW_USB_PORTS;
 			ixp4xx_pci_write(addr, NP_CMD_CONFIGWRITE, value);
 		}
 	}
+
+	if (has_cs5536()) {
+		struct pci_dev *pci_dev;
+		u8 __iomem *ptr;
+		u32 h, l;
+
+/* GPIO */
+		/* FIXME 0x2000 */
+		write_msr(0x5140000C, 0xF001, 0x2000); /* GPIO at 0x2000 */
+		write_msr(0x510100E2, 0x80000002, 0x000fff00); /* 256 bytes */
+		/* GPIO1 = beeper (OUT AUX1)
+		   GPIO2 = IDE IRQ (IN AUX1)
+		   GPIO5 = IDE cable ID (IN) */
+		setup_cs5536_gpio(0x0002, 0x2004); /* OUT enable */
+		setup_cs5536_gpio(0x0002, 0x2010); /* OUT AUX1 */
+		setup_cs5536_gpio(0x0000, 0x2014); /* OUT AUX2 */
+		setup_cs5536_gpio(0xFFFB, 0x2018); /* pull-up enable */
+		setup_cs5536_gpio(0x0004, 0x201C); /* pull-down enable */
+		setup_cs5536_gpio(0x0024, 0x2020); /* IN enable */
+		setup_cs5536_gpio(0x0004, 0x2034); /* IN AUX1 */
+		setup_cs5536_gpio(0x0000, 0x20A0); /* IN enable */
+		setup_cs5536_gpio(0x0000, 0x20B4); /* IN AUX1 */
+
+/* USB */
+		read_msr(0x51200000, &h, &l);
+		ohci.data.regs.revision = ehci.data.regs.revision = l;
+
+		/* map and set USB option registers */
+		write_msr(0x5120000B, 2, PCIBIOS_MIN_MEM);
+		write_msr(0x51010020, 0x40000000 | PCIBIOS_MIN_MEM >> 24,
+			  (PCIBIOS_MIN_MEM << 8) | 0xFFFFF);
+		if (!(ptr = ioremap(PCIBIOS_MIN_MEM, 0x80)))
+			printk(KERN_CRIT "goramo-mlr: unable to access CS5536 "
+			       "PCI address space\n");
+		else {
+			/* assign USB port #4 to USB host controller */
+			writel((readl(ptr + 4) & ~3) | 2, ptr + 4);
+			iounmap(ptr);
+		}
+		/* reset maps */
+		write_msr(0x5120000B, 0, 0);
+		write_msr(0x51010020, 0x000000FF, 0xFFF00000);
+
+/* IDE */
+		read_msr(0x51300000, &h, &l);
+		ide.data.regs.revision = l;
+
+		read_msr(0x51400015, &h, &l);
+		write_msr(0x51400015, h, l | 1); /* IDE, not flash */
+
+		ide.data.regs.bars[0] = cpu_to_le32(0x1F1);
+		ide.data.regs.bars[1] = cpu_to_le32(0x3F7);
+		pci_dev = pci_get_bus_and_slot(0, PCI_DEVFN(SLOT_CS5536,
+							    DEV_CS5536_IDE));
+		if (pci_dev) {
+			pci_dev->resource[0].start = 0x1F0;
+			pci_dev->resource[0].end = 0x1F7;
+			pci_dev->resource[0].flags = IORESOURCE_IO |
+				IORESOURCE_PCI_FIXED;
+			pci_dev->resource[1].start = 0x3F6;
+			pci_dev->resource[1].end = 0x3F6;
+			pci_dev->resource[1].flags = IORESOURCE_IO |
+				IORESOURCE_PCI_FIXED;
+		}
+
+		l = 2;		/* channel enabled */
+		if (!(inl(0x2030) & 0x20))
+			l |= 0x30000; /* assume 80-wire cable */
+		write_msr(0x51300010, 0, l);
+		ide.data.regs32[0x10] = cpu_to_le32(l);
+		read_msr(0x51300012, &h, &l);
+		ide.data.regs32[0x12] = cpu_to_le32(l);
+		read_msr(0x51300013, &h, &l);
+		ide.data.regs32[0x13] = cpu_to_le32(l);
+		read_msr(0x51300014, &h, &l);
+		ide.data.regs32[0x14] = cpu_to_le32(l);
+
+/* RTC */
+		write_msr(0x51400057, 0, RTC_CENTURY);
+		outb(RTC_REG_D, 0x70);
+		if (!(inb(0x71) & 0x80))
+			printk(KERN_ERR "RTC: battery fault recorded\n");
+		/* Make sure we do 24 hrs BCD */
+		outb(RTC_REG_B, 0x70);
+		l = inb(0x71);
+		if ((l & (RTC_DM_BINARY | RTC_24H)) != RTC_24H)
+			outb((l & ~RTC_DM_BINARY) | RTC_24H, 0x71);
+
+/* Interrupts */
+		/* initialize CS5536 dual 8259A IRQ controller */
+		outb(0xFF, 0x21); /* mask all IRQs */
+		outb(0xFF, 0xA1);
+		outb(0x19, 0x20); /* ICW1 (master) level-triggered */
+		outb(0x00, 0x21); /* ICW2 */
+		outb(0x04, 0x21); /* ICW3 */
+		outb(0x01, 0x21); /* ICW4 */
+		outb(0x19, 0xA0); /* ICW1 (slave) level-triggered */
+		outb(0x00, 0xA1); /* ICW2 */
+		outb(0x02, 0xA1); /* ICW3 */
+		outb(0x01, 0xA1); /* ICW4 */
+
+		outb(0xFB, 0x21); /* mask all but cascade IRQ2 */
+		outb(0xFF, 0xA1);
+
+		write_msr(0x51400020, 0, 0xF00); /* USB uses IRQ15 (Y15) */
+		write_msr(0x51000010, 0x44000030, 0x00000013); /* CIS mode C */
+
+		irq_set_chip_and_handler(IRQ_CS5536_IDE, &cs5536_irqchip, handle_level_irq);
+		set_irq_flags(IRQ_CS5536_IDE, IRQF_VALID);
+		irq_set_status_flags(IRQ_CS5536_IDE, IRQ_LEVEL);
+
+		irq_set_chip_and_handler(IRQ_CS5536_USB, &cs5536_irqchip, handle_level_irq);
+		set_irq_flags(IRQ_CS5536_USB, IRQF_VALID);
+		irq_set_status_flags(IRQ_CS5536_USB, IRQ_LEVEL);
+
+		irq_set_status_flags(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC_CS5536), IRQ_LEVEL);
+		irq_set_chained_handler(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC_CS5536),
+					cs5536_irq_handler);
+	}
+}
+
+static int __init gmlr_pci_setup(int nr, struct pci_sys_data *sys)
+{
+	int res = ixp4xx_setup(nr, sys);
+	if (res) {
+		u32 v;
+		ixp4xx_pci_write(0, NP_CMD_IOWRITE, CS5536_ADDRESS); /* IDSEL */
+		ixp4xx_pci_read(CS5536_ADDRESS, NP_CMD_CONFIGREAD, &v);
+		sb.data.regs32[0] = cpu_to_le32(v); /* vendor and device ID */
+		ixp4xx_pci_read(CS5536_ADDRESS + 8, NP_CMD_CONFIGREAD, &v);
+		sb.data.regs.revision = v;
+	}
+	return res;
 }
 
 static int __init gmlr_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
-	switch(slot) {
-	case SLOT_ETHA:	return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA);
-	case SLOT_ETHB:	return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB);
-	case SLOT_NEC:	return IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC);
-	default:	return IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI);
+	switch (slot) {
+	case SLOT_CS5536:
+		break;
+	case SLOT_ETHA:
+		return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA);
+	case SLOT_ETHB:
+		return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB);
+	case SLOT_NEC:
+		return IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC_CS5536);
+	default:
+		return IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI);
 	}
+
+	switch (pin) {
+	case 1:
+		return IRQ_CS5536_USB;
+	case 2:
+		return IRQ_CS5536_IDE;
+	default:
+		return -1;
+	}
+}
+
+static struct cs5536_pci_device* cs5536_get_dev(unsigned int devfn)
+{
+	if (PCI_FUNC(devfn) == DEV_CS5536_SB)
+		return &sb;
+	else if (PCI_FUNC(devfn) == DEV_CS5536_OHCI &&
+		 (hw_bits & CFG_HW_USB_PORTS))
+		return &ohci;
+	else if (PCI_FUNC(devfn) == DEV_CS5536_EHCI &&
+		 (hw_bits & CFG_HW_USB_PORTS))
+		return &ehci;
+	else if (PCI_FUNC(devfn) == DEV_CS5536_IDE &&
+		 (hw_bits & CFG_HW_HAS_IDE))
+		return &ide;
+	else
+		return NULL;
+}
+
+static int cs5536_pci_read(unsigned int devfn, int where, int len,
+			   uint32_t *value)
+{
+	struct cs5536_pci_device *device;
+
+	if (!(device = cs5536_get_dev(devfn)))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (where >= sizeof(device->data)) {
+		*value = 0;
+		return 0;	/* nothing there */
+	}
+
+	switch (len) {
+	case 1:
+		*value = device->data.regs8[where];
+		break;
+	case 2:
+		*value = le16_to_cpu(device->data.regs16[where >> 1]);
+		break;
+	case 4:
+		*value = le32_to_cpu(device->data.regs32[where >> 2]);
+		break;
+	default:
+		BUG();
+	}
+
+#if DEBUG_PCI
+	printk(KERN_INFO "cs5536_pci_read from %X size %X dev 0:%X:%X -> %X\n",
+	       where, len, PCI_SLOT(devfn), PCI_FUNC(devfn), *value);
+#endif
+	return 0;
+}
+
+static int cs5536_pci_write(unsigned int devfn, int where, int len,
+			    uint32_t value)
+{
+	struct cs5536_pci_device *device;
+	__le32 mask;
+
+	if (!(device = cs5536_get_dev(devfn)))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+#if DEBUG_PCI
+	printk(KERN_INFO "cs5536_pci_write to %X size %X value %X dev 0:"
+	       "%X:%X\n", where, len, value, PCI_SLOT(devfn), PCI_FUNC(devfn));
+#endif
+
+	if (where >= sizeof(device->data))
+		return 0;	/* nothing there */
+
+	switch (len) {
+	case 1:
+		mask = device->mask.regs8[where];
+		value &= mask;
+
+		device->data.regs8[where] &= ~mask;
+		device->data.regs8[where] |= value;
+		break;
+	case 2:
+		where &= ~1;
+		mask = device->mask.regs16[where >> 1]; /* little-endian */
+		value &= le16_to_cpu(mask);
+
+		device->data.regs16[where >> 1] &= ~mask;
+		device->data.regs16[where >> 1] |= cpu_to_le16(value);
+		break;
+	case 4:
+		where &= ~3;
+		mask = device->mask.regs32[where >> 2]; /* little-endian */
+		value &= le32_to_cpu(mask);
+
+		device->data.regs32[where >> 2] &= ~mask;
+		device->data.regs32[where >> 2] |= cpu_to_le32(value);
+		break;
+	default:
+		BUG();
+	}
+
+	if (len == 4 && where == 0x10) { /* write to BAR0 */
+		switch (PCI_FUNC(devfn)) {
+		case DEV_CS5536_OHCI:
+			/* USB OHCI base address MSR */
+			write_msr(0x51200008, 6, value);
+			/* P2D descriptor for USB OHCI */
+			write_msr(0x51010020, 0x40000000 | value >> 24,
+				  (value << 8) | 0xFFFFF);
+			break;
+
+		case DEV_CS5536_EHCI:
+			/* USB EHCI base address MSR */
+			write_msr(0x51200009, 0x2006, value);
+			/* P2D descriptor for USB EHCI */
+			write_msr(0x51010021, 0x40000000 | value >> 24,
+				  (value << 8) | 0xFFFFF);
+			break;
+		}
+		return 0;
+	}
+
+	if (PCI_FUNC(devfn) == DEV_CS5536_IDE && len == 4)
+		switch (where) {
+		case 0x20: /* BAR4 */
+			/* Bus mastering IDE base address MSR - 20-bit */
+			write_msr(0x51300008, 0, value);
+			/* IOD descriptor for IDE */
+			write_msr(0x510100E1, 0x60000000 | value >> 12,
+				  (value << 20) | 0xFFFF8);
+			break;
+		case 0x40:
+			write_msr(0x51300010, 0, value);
+			break;
+		case 0x48:
+			write_msr(0x51300012, 0, value);
+			break;
+		case 0x4C:
+			write_msr(0x51300013, 0, value);
+			break;
+		case 0x50:
+			write_msr(0x51300014, 0, value);
+			break;
+		}
+
+	return 0;
+}
+
+static int gmlr_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 *value)
+{
+	if (!bus->number && PCI_SLOT(devfn) == SLOT_CS5536 &&
+	    (PCI_FUNC(devfn) != DEV_CS5536_SB || where < 0x10))
+		return cs5536_pci_read(devfn, where, size, value);
+
+	return ixp4xx_pci_read_config(bus, devfn, where, size, value);
+}
+
+static int gmlr_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+				 int where, int size, u32 value)
+{
+	if (!bus->number && PCI_SLOT(devfn) == SLOT_CS5536 &&
+	    (PCI_FUNC(devfn) != DEV_CS5536_SB || where < 0x10))
+		return cs5536_pci_write(devfn, where, size, value);
+
+	return ixp4xx_pci_write_config(bus, devfn, where, size, value);
+}
+
+struct pci_ops gmlr_ops = {
+	.read =  gmlr_pci_read_config,
+	.write = gmlr_pci_write_config,
+};
+
+struct pci_bus *gmlr_scan_bus(int nr, struct pci_sys_data *sys)
+{
+	return pci_scan_bus(sys->busnr, &gmlr_ops, sys);
 }
 
 static struct hw_pci gmlr_hw_pci __initdata = {
@@ -477,15 +1165,14 @@
 	.preinit	= gmlr_pci_preinit,
 	.postinit	= gmlr_pci_postinit,
 	.swizzle	= pci_std_swizzle,
-	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
+	.setup		= gmlr_pci_setup,
+	.scan		= gmlr_scan_bus,
 	.map_irq	= gmlr_map_irq,
 };
 
 static int __init gmlr_pci_init(void)
 {
-	if (machine_is_goramo_mlr() &&
-	    (hw_bits & (CFG_HW_USB_PORTS | CFG_HW_HAS_PCI_SLOT)))
+	if (machine_is_goramo_mlr() && has_pci())
 		pci_common_init(&gmlr_hw_pci);
 	return 0;
 }
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 57b5410..488677f 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -19,9 +19,10 @@
 
 #define IO_SPACE_LIMIT 0x0000ffff
 
-extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
+extern int ixp4xx_pci_read(u32 addr, u32 cmd, u32* data);
 extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
-
+int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value);
+int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value);
 
 /*
  * IXP4xx provides two methods of accessing PCI memory space:
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
index 97c530f..2b5f27f 100644
--- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
@@ -30,19 +30,20 @@
  *
  * 0x50000000	0x10000000	ioremap'd	EXP BUS
  *
- * 0x6000000	0x00004000	ioremap'd	QMgr
+ * 0x60000000	0x00004000	0xffbe7000	QMgr
  *
- * 0xC0000000	0x00001000	0xffbff000	PCI CFG
+ * 0xC8000000	0x00013000	0xffbeb000	On-Chip Peripherals
  *
  * 0xC4000000	0x00001000	0xffbfe000	EXP CFG
  *
- * 0xC8000000	0x00013000	0xffbeb000	On-Chip Peripherals
+ * 0xC0000000	0x00001000	0xffbff000	PCI CFG
  */
 
 /*
  * Queue Manager
  */
 #define IXP4XX_QMGR_BASE_PHYS		(0x60000000)
+#define IXP4XX_QMGR_BASE_VIRT		(0xFFBE7000)
 #define IXP4XX_QMGR_REGION_SIZE		(0x00004000)
 
 /*
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
index e824c02..4e25a49 100644
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -106,9 +106,9 @@
 /* Information about built-in HSS (synchronous serial) interfaces */
 struct hss_plat_info {
 	int (*set_clock)(int port, unsigned int clock_type);
-	int (*open)(int port, void *pdev,
-		    void (*set_carrier_cb)(void *pdev, int carrier));
+	void (*open)(int port, void *pdev, void (*set_carrier_cb)(void *pdev, int carrier));
 	void (*close)(int port, void *pdev);
+	int (*get_carrier)(int port);
 	u8 txreadyq;
 };
 
diff --git a/arch/arm/mach-ixp4xx/include/mach/qmgr.h b/arch/arm/mach-ixp4xx/include/mach/qmgr.h
index 9e7cad2..0a88d3b 100644
--- a/arch/arm/mach-ixp4xx/include/mach/qmgr.h
+++ b/arch/arm/mach-ixp4xx/include/mach/qmgr.h
@@ -86,7 +86,7 @@
 
 static inline void qmgr_put_entry(unsigned int queue, u32 val)
 {
-	extern struct qmgr_regs __iomem *qmgr_regs;
+	const struct qmgr_regs __iomem *qmgr_regs = (void __iomem *)IXP4XX_QMGR_BASE_VIRT;
 #if DEBUG_QMGR
 	BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
 
@@ -99,7 +99,7 @@
 static inline u32 qmgr_get_entry(unsigned int queue)
 {
 	u32 val;
-	extern struct qmgr_regs __iomem *qmgr_regs;
+	const struct qmgr_regs __iomem *qmgr_regs = (void __iomem *)IXP4XX_QMGR_BASE_VIRT;
 	val = __raw_readl(&qmgr_regs->acc[queue][0]);
 #if DEBUG_QMGR
 	BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
@@ -112,14 +112,14 @@
 
 static inline int __qmgr_get_stat1(unsigned int queue)
 {
-	extern struct qmgr_regs __iomem *qmgr_regs;
+	const struct qmgr_regs __iomem *qmgr_regs = (void __iomem *)IXP4XX_QMGR_BASE_VIRT;
 	return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
 		>> ((queue & 7) << 2)) & 0xF;
 }
 
 static inline int __qmgr_get_stat2(unsigned int queue)
 {
-	extern struct qmgr_regs __iomem *qmgr_regs;
+	const struct qmgr_regs __iomem *qmgr_regs = (void __iomem *)IXP4XX_QMGR_BASE_VIRT;
 	BUG_ON(queue >= HALF_QUEUES);
 	return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
 		>> ((queue & 0xF) << 1)) & 0x3;
@@ -145,7 +145,7 @@
  */
 static inline int qmgr_stat_below_low_watermark(unsigned int queue)
 {
-	extern struct qmgr_regs __iomem *qmgr_regs;
+	const struct qmgr_regs __iomem *qmgr_regs = (void __iomem *)IXP4XX_QMGR_BASE_VIRT;
 	if (queue >= HALF_QUEUES)
 		return (__raw_readl(&qmgr_regs->statne_h) >>
 			(queue - HALF_QUEUES)) & 0x01;
@@ -172,7 +172,7 @@
  */
 static inline int qmgr_stat_full(unsigned int queue)
 {
-	extern struct qmgr_regs __iomem *qmgr_regs;
+	const struct qmgr_regs __iomem *qmgr_regs = (void __iomem *)IXP4XX_QMGR_BASE_VIRT;
 	if (queue >= HALF_QUEUES)
 		return (__raw_readl(&qmgr_regs->statf_h) >>
 			(queue - HALF_QUEUES)) & 0x01;
diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
index 852f7c9..af759cd 100644
--- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
+++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
@@ -14,7 +14,7 @@
 #include <linux/module.h>
 #include <mach/qmgr.h>
 
-struct qmgr_regs __iomem *qmgr_regs;
+static const struct qmgr_regs __iomem *qmgr_regs = (void __iomem *)IXP4XX_QMGR_BASE_VIRT;
 static struct resource *mem_res;
 static spinlock_t qmgr_lock;
 static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
@@ -32,7 +32,7 @@
 
 	spin_lock_irqsave(&qmgr_lock, flags);
 	if (queue < HALF_QUEUES) {
-		u32 __iomem *reg;
+		const u32 __iomem *reg;
 		int bit;
 		BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
 		reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
@@ -48,6 +48,7 @@
 	spin_unlock_irqrestore(&qmgr_lock, flags);
 }
 
+#ifdef CONFIG_IXP4XX_SUPPORT_425A0
 
 static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
 {
@@ -91,7 +92,7 @@
 	}
 	return ret;
 }
-
+#endif /* CONFIG_IXP4XX_SUPPORT_425A0 */
 
 static irqreturn_t qmgr_irq(int irq, void *pdev)
 {
@@ -293,12 +294,6 @@
 	if (mem_res == NULL)
 		return -EBUSY;
 
-	qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
-	if (qmgr_regs == NULL) {
-		err = -ENOMEM;
-		goto error_map;
-	}
-
 	/* reset qmgr registers */
 	for (i = 0; i < 4; i++) {
 		__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
@@ -316,10 +311,12 @@
 	for (i = 0; i < QUEUES; i++)
 		__raw_writel(0, &qmgr_regs->sram[i]);
 
+#ifdef CONFIG_IXP4XX_SUPPORT_425A0
 	if (cpu_is_ixp42x_rev_a0()) {
 		handler1 = qmgr_irq1_a0;
 		handler2 = qmgr_irq2_a0;
 	} else
+#endif
 		handler1 = handler2 = qmgr_irq;
 
 	err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager",
@@ -347,8 +344,6 @@
 error_irq2:
 	free_irq(IRQ_IXP4XX_QM1, NULL);
 error_irq:
-	iounmap(qmgr_regs);
-error_map:
 	release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
 	return err;
 }
@@ -359,7 +354,6 @@
 	free_irq(IRQ_IXP4XX_QM2, NULL);
 	synchronize_irq(IRQ_IXP4XX_QM1);
 	synchronize_irq(IRQ_IXP4XX_QM2);
-	iounmap(qmgr_regs);
 	release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
 }
 
@@ -369,7 +363,6 @@
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Krzysztof Halasa");
 
-EXPORT_SYMBOL(qmgr_regs);
 EXPORT_SYMBOL(qmgr_set_irq);
 EXPORT_SYMBOL(qmgr_enable_irq);
 EXPORT_SYMBOL(qmgr_disable_irq);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82a093c..f9c9a6d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -406,7 +406,7 @@
  */
 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
 {
-	WARN_ON(irqs_disabled());
+	// WARN_ON(irqs_disabled()); FIXME
 
 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
 		return;
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index b42edaa..957d555 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -766,7 +766,22 @@
 	if (rc)
 		return rc;
 
+#ifdef CONFIG_ARCH_IXP4XX
+	/* IXP4xx CPUs can't perform 8 and 16-bit MMIO reads,
+	   use normal IO from/to regions 0-5 instead.
+	   region 0: channel 0 (and 2) task file regs
+	   region 1: channel 0 (and 2) auxiliary status
+	   region 2: channel 1 (and 3) task file regs
+	   region 3: channel 1 (and 3) auxiliary status
+	   region 4: bus master DMA command and status for all channels
+	   region 5: the normal MMIO
+
+	   Channels 2 and 3 are present only on SIL3114, device selection
+	   is done with ATA_DEV1 bit in ATA_REG_DEVICE. FIXME - untested */
+	rc = pcim_iomap_regions(pdev, 0x3F, DRV_NAME);
+#else
 	rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
+#endif
 	if (rc == -EBUSY)
 		pcim_pin_device(pdev);
 	if (rc)
@@ -786,10 +801,16 @@
 		struct ata_port *ap = host->ports[i];
 		struct ata_ioports *ioaddr = &ap->ioaddr;
 
+#ifdef CONFIG_ARCH_IXP4XX
+		ioaddr->cmd_addr = host->iomap[(i % 2) * 2];
+		ioaddr->altstatus_addr = host->iomap[1 + (i % 2) * 2] + 2;
+		ioaddr->bmdma_addr = host->iomap[4] + sil_port[i % 2].bmdma;
+#else
 		ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
-		ioaddr->altstatus_addr =
-		ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
+		ioaddr->altstatus_addr = mmio_base + sil_port[i].ctl;
 		ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
+#endif
+		ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
 		ioaddr->scr_addr = mmio_base + sil_port[i].scr;
 		ata_sff_std_ports(ioaddr);
 
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index a60043b..0147fa1 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -100,12 +100,12 @@
 	  If unsure, say Y.
 
 config HW_RANDOM_IXP4XX
-	tristate "Intel IXP4xx NPU HW Random Number Generator support"
+	tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support"
 	depends on HW_RANDOM && ARCH_IXP4XX
 	default HW_RANDOM
 	---help---
-	  This driver provides kernel-side support for the Random
-	  Number Generator hardware found on the Intel IXP4xx NPU.
+	  This driver provides kernel-side support for the Pseudo-Random
+	  Number Generator hardware found on the Intel IXP45x/46x NPU.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ixp4xx-rng.
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index 263567f..beec162 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -45,6 +45,9 @@
 	void __iomem * rng_base;
 	int err;
 
+	if (!cpu_is_ixp46x()) /* includes IXP455 */
+		return -ENOSYS;
+
 	rng_base = ioremap(0x70002100, 4);
 	if (!rng_base)
 		return -ENOMEM;
@@ -68,5 +71,5 @@
 module_exit(ixp4xx_rng_exit);
 
 MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for IXP4xx");
+MODULE_DESCRIPTION("H/W Pseudo-Random Number Generator (RNG) driver for IXP45x/46x");
 MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e0b25de..28659be 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -230,7 +230,7 @@
 
 config CRYPTO_DEV_IXP4XX
 	tristate "Driver for IXP4xx crypto hardware acceleration"
-	depends on ARCH_IXP4XX
+	depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
 	select CRYPTO_DES
 	select CRYPTO_ALGAPI
 	select CRYPTO_AUTHENC
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 4c20c5b..4dd349c 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -65,7 +65,7 @@
 
 #define MOD_DES     0x0000
 #define MOD_TDEA2   0x0100
-#define MOD_3DES   0x0200
+#define MOD_3DES    0x0200
 #define MOD_AES     0x0800
 #define MOD_AES128  (0x0800 | KEYLEN_128)
 #define MOD_AES192  (0x0900 | KEYLEN_192)
@@ -105,7 +105,7 @@
 	u16 buf_len;
 #endif
 	u32 phys_addr;
-	u32 __reserved[4];
+	u32 __reserved[5];
 	struct buffer_desc *next;
 	enum dma_data_direction dir;
 };
@@ -121,7 +121,7 @@
 	u8 mode;		/* NPE_OP_*  operation mode */
 #endif
 	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
-	u32 icv_rev_aes;	/* icv or rev aes */
+	u32 icv_rev_aes;	/* address to store icv or rev aes */
 	u32 src_buf;
 	u32 dst_buf;
 #ifdef __ARMEB__
@@ -138,7 +138,7 @@
 	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
 	u32 crypto_ctx;		/* NPE Crypto Param structure address */
 
-	/* Used by Host: 4*4 bytes*/
+	/* Used only by host: 4 * 4 bytes */
 	unsigned ctl_flags;
 	union {
 		struct ablkcipher_request *ablk_req;
@@ -209,10 +209,10 @@
 };
 
 static struct npe *npe_c;
-static struct dma_pool *buffer_pool = NULL;
-static struct dma_pool *ctx_pool = NULL;
+static struct dma_pool *buffer_pool;
+static struct dma_pool *ctx_pool;
 
-static struct crypt_ctl *crypt_virt = NULL;
+static struct crypt_ctl *crypt_virt;
 static dma_addr_t crypt_phys;
 
 static int support_aes = 1;
@@ -247,12 +247,12 @@
 
 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
 {
-	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
+	return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->cfg_enc;
 }
 
 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
 {
-	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
+	return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->cfg_dec;
 }
 
 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
@@ -276,28 +276,27 @@
 static struct crypt_ctl *get_crypt_desc(void)
 {
 	int i;
-	static int idx = 0;
+	static int idx;
 	unsigned long flags;
+	struct crypt_ctl *desc = NULL;
 
 	spin_lock_irqsave(&desc_lock, flags);
 
 	if (unlikely(!crypt_virt))
 		setup_crypt_desc();
-	if (unlikely(!crypt_virt)) {
-		spin_unlock_irqrestore(&desc_lock, flags);
-		return NULL;
-	}
+	if (unlikely(!crypt_virt))
+		goto out;
+
 	i = idx;
 	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 		if (++idx >= NPE_QLEN)
 			idx = 0;
 		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
-		spin_unlock_irqrestore(&desc_lock, flags);
-		return crypt_virt +i;
-	} else {
-		spin_unlock_irqrestore(&desc_lock, flags);
-		return NULL;
+		desc = crypt_virt + i;
 	}
+out:
+	spin_unlock_irqrestore(&desc_lock, flags);
+	return desc;
 }
 
 static spinlock_t emerg_lock;
@@ -320,15 +319,13 @@
 		if (++idx >= NPE_QLEN_TOTAL)
 			idx = NPE_QLEN;
 		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
-		spin_unlock_irqrestore(&emerg_lock, flags);
-		return crypt_virt +i;
-	} else {
-		spin_unlock_irqrestore(&emerg_lock, flags);
-		return NULL;
+		desc = crypt_virt + i;
 	}
+	spin_unlock_irqrestore(&emerg_lock, flags);
+	return desc;
 }
 
-static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf, u32 phys)
 {
 	while (buf) {
 		struct buffer_desc *buf1;
@@ -353,10 +350,9 @@
 	int authsize = crypto_aead_authsize(tfm);
 	int decryptlen = req->cryptlen - authsize;
 
-	if (req_ctx->encrypt) {
+	if (req_ctx->encrypt)
 		scatterwalk_map_and_copy(req_ctx->hmac_virt,
 			req->src, decryptlen, authsize, 1);
-	}
 	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 }
 
@@ -376,9 +372,8 @@
 		struct aead_ctx *req_ctx = aead_request_ctx(req);
 
 		free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
-		if (req_ctx->hmac_virt) {
+		if (req_ctx->hmac_virt)
 			finish_scattered_hmac(crypt);
-		}
 		req->base.complete(&req->base, failed);
 		break;
 	}
@@ -386,9 +381,8 @@
 		struct ablkcipher_request *req = crypt->data.ablk_req;
 		struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 
-		if (req_ctx->dst) {
+		if (req_ctx->dst)
 			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-		}
 		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 		req->base.complete(&req->base, failed);
 		break;
@@ -403,7 +397,7 @@
 		break;
 	case CTL_FLAG_GEN_REVAES:
 		ctx = crypto_tfm_ctx(crypt->data.tfm);
-		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+		*(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
 		if (atomic_dec_and_test(&ctx->configuring))
 			complete(&ctx->completion);
 		break;
@@ -422,7 +416,7 @@
 {
 	int i;
 
-	for(i=0; i<4; i++) {
+	for (i = 0; i < 4; i++) {
 		dma_addr_t phys = qmgr_get_entry(RECV_QID);
 		if (!phys)
 			return;
@@ -436,8 +430,8 @@
 	int ret = -ENODEV;
 	u32 msg[2] = { 0, 0 };
 
-	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
-				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
+	if (!(ixp4xx_read_feature_bits() &
+	      (IXP4XX_FEATURE_HASH | IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
 		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
 		return ret;
 	}
@@ -447,18 +441,13 @@
 
 	if (!npe_running(npe_c)) {
 		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
-		if (ret) {
+		if (ret)
 			return ret;
-		}
-		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
-			goto npe_error;
-	} else {
-		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
-			goto npe_error;
+	} else if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+		goto npe_error;
 
-		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
-			goto npe_error;
-	}
+	if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+		goto npe_error;
 
 	switch ((msg[1]>>16) & 0xff) {
 	case 3:
@@ -482,14 +471,12 @@
 	buffer_pool = dma_pool_create("buffer", dev,
 			sizeof(struct buffer_desc), 32, 0);
 	ret = -ENOMEM;
-	if (!buffer_pool) {
+	if (!buffer_pool)
 		goto err;
-	}
 	ctx_pool = dma_pool_create("context", dev,
 			NPE_CTX_LEN, 16, 0);
-	if (!ctx_pool) {
+	if (!ctx_pool)
 		goto err;
-	}
 	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
 				 "ixp_crypto:out", NULL);
 	if (ret)
@@ -531,11 +518,10 @@
 
 	npe_release(npe_c);
 
-	if (crypt_virt) {
+	if (crypt_virt)
 		dma_free_coherent(dev,
-			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
+			NPE_QLEN_TOTAL * sizeof(struct crypt_ctl),
 			crypt_virt, crypt_phys);
-	}
 	return;
 }
 
@@ -549,9 +535,8 @@
 static int init_sa_dir(struct ix_sa_dir *dir)
 {
 	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
-	if (!dir->npe_ctx) {
+	if (!dir->npe_ctx)
 		return -ENOMEM;
-	}
 	reset_sa_dir(dir);
 	return 0;
 }
@@ -572,9 +557,8 @@
 	if (ret)
 		return ret;
 	ret = init_sa_dir(&ctx->decrypt);
-	if (ret) {
+	if (ret)
 		free_sa_dir(&ctx->encrypt);
-	}
 	return ret;
 }
 
@@ -625,9 +609,8 @@
 
 	memcpy(pad, key, key_len);
 	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
-	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
+	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
 		pad[i] ^= xpad;
-	}
 
 	crypt->data.tfm = tfm;
 	crypt->regist_ptr = pad;
@@ -642,7 +625,7 @@
 	crypt->init_len = init_len;
 	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
 
-	buf->next = 0;
+	buf->next = NULL;
 	buf->buf_len = HMAC_PAD_BLOCKLEN;
 	buf->pkt_len = 0;
 	buf->phys_addr = pad_phys;
@@ -669,11 +652,11 @@
 	algo = ix_hash(tfm);
 
 	/* write cfg word to cryptinfo */
-	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
+	cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
 #ifndef __ARMEB__
 	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
 #endif
-	*(u32*)cinfo = cpu_to_be32(cfgword);
+	*(__be32 *)cinfo = cpu_to_be32(cfgword);
 	cinfo += sizeof(cfgword);
 
 	/* write ICV to cryptinfo */
@@ -707,10 +690,9 @@
 	struct ix_sa_dir *dir = &ctx->decrypt;
 
 	crypt = get_crypt_desc_emerg();
-	if (!crypt) {
+	if (!crypt)
 		return -EAGAIN;
-	}
-	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+	*(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
 
 	crypt->data.tfm = tfm;
 	crypt->crypt_offs = 0;
@@ -744,49 +726,46 @@
 	if (encrypt) {
 		cipher_cfg = cipher_cfg_enc(tfm);
 		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
-	} else {
+	} else
 		cipher_cfg = cipher_cfg_dec(tfm);
-	}
+
 	if (cipher_cfg & MOD_AES) {
 		switch (key_len) {
-			case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
-			case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
-			case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
-			default:
-				*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-				return -EINVAL;
+		case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
+		case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
+		case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
+		default:
+			*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			return -EINVAL;
 		}
 		cipher_cfg |= keylen_cfg;
 	} else if (cipher_cfg & MOD_3DES) {
 		const u32 *K = (const u32 *)key;
 		if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-			     !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
-		{
+			     !((K[2] ^ K[4]) | (K[3] ^ K[5])))) {
 			*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
 			return -EINVAL;
 		}
 	} else {
 		u32 tmp[DES_EXPKEY_WORDS];
-		if (des_ekey(tmp, key) == 0) {
+		if (des_ekey(tmp, key) == 0)
 			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		}
 	}
 	/* write cfg word to cryptinfo */
-	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
+	*(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
 	cinfo += sizeof(cipher_cfg);
 
 	/* write cipher key to cryptinfo */
 	memcpy(cinfo, key, key_len);
 	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
 	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
-		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
+		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
 		key_len = DES3_EDE_KEY_SIZE;
 	}
 	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
 	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
-	if ((cipher_cfg & MOD_AES) && !encrypt) {
+	if ((cipher_cfg & MOD_AES) && !encrypt)
 		return gen_rev_aes_key(tfm);
-	}
 	return 0;
 }
 
@@ -795,7 +774,7 @@
 		struct buffer_desc *buf, gfp_t flags,
 		enum dma_data_direction dir)
 {
-	for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
+	for (; nbytes > 0; sg = scatterwalk_sg_next(sg)) {
 		unsigned len = min(nbytes, sg->length);
 		struct buffer_desc *next_buf;
 		u32 next_buf_phys;
@@ -805,8 +784,8 @@
 		ptr = page_address(sg_page(sg)) + sg->offset;
 		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
 		if (!next_buf) {
-			buf = NULL;
-			break;
+			buf->next = NULL;
+			return NULL;
 		}
 		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
 		buf->next = next_buf;
@@ -846,11 +825,10 @@
 		goto out;
 
 	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
-		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY)
 			ret = -EINVAL;
-		} else {
+		else
 			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
-		}
 	}
 out:
 	if (!atomic_dec_and_test(&ctx->configuring))
@@ -922,9 +900,8 @@
 		src_direction = DMA_TO_DEVICE;
 		req_ctx->dst = dst_hook.next;
 		crypt->dst_buf = dst_hook.phys_next;
-	} else {
+	} else
 		req_ctx->dst = NULL;
-	}
 	req_ctx->src = NULL;
 	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
 				flags, src_direction))
@@ -940,9 +917,8 @@
 free_buf_src:
 	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 free_buf_dest:
-	if (req->src != req->dst) {
+	if (req->src != req->dst)
 		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-	}
 	crypt->ctl_flags = CTL_FLAG_UNUSED;
 	return -ENOMEM;
 }
@@ -966,7 +942,7 @@
 	int ret;
 
 	/* set up counter block */
-        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
+	memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
 	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
 
 	/* initialize counter portion of counter block */
@@ -1023,7 +999,7 @@
 	} else {
 		dir = &ctx->decrypt;
 		/* req->cryptlen includes the authsize when decrypting */
-		cryptlen = req->cryptlen -authsize;
+		cryptlen = req->cryptlen - authsize;
 		eff_cryptlen -= authsize;
 	}
 	crypt = get_crypt_desc();
@@ -1043,9 +1019,8 @@
 	BUG_ON(ivsize && !req->iv);
 	memcpy(crypt->iv, req->iv, ivsize);
 
-	if (req->src != req->dst) {
-		BUG(); /* -ENOTSUP because of my laziness */
-	}
+	if (req->src != req->dst)
+		BUG(); /* -ENOTSUP because of my lazyness */
 
 	/* ASSOC data */
 	buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
@@ -1068,32 +1043,28 @@
 				&crypt->icv_rev_aes);
 		if (unlikely(!req_ctx->hmac_virt))
 			goto free_chain;
-		if (!encrypt) {
+		if (!encrypt)
 			scatterwalk_map_and_copy(req_ctx->hmac_virt,
 				req->src, cryptlen, authsize, 0);
-		}
 		req_ctx->encrypt = encrypt;
-	} else {
+	} else
 		req_ctx->hmac_virt = NULL;
-	}
 	/* Crypt */
 	buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
 			DMA_BIDIRECTIONAL);
 	if (!buf)
 		goto free_hmac_virt;
-	if (!req_ctx->hmac_virt) {
+	if (!req_ctx->hmac_virt)
 		crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
-	}
 
 	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 	BUG_ON(qmgr_stat_overflow(SEND_QID));
 	return -EINPROGRESS;
 free_hmac_virt:
-	if (req_ctx->hmac_virt) {
+	if (req_ctx->hmac_virt)
 		dma_pool_free(buffer_pool, req_ctx->hmac_virt,
 				crypt->icv_rev_aes);
-	}
 free_chain:
 	free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
 out:
@@ -1135,9 +1106,8 @@
 		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
 			ret = -EINVAL;
 			goto out;
-		} else {
+		} else
 			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
-		}
 	}
 out:
 	if (!atomic_dec_and_test(&ctx->configuring))
@@ -1149,7 +1119,7 @@
 {
 	int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
 
-	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
+	if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
 		return -EINVAL;
 	return aead_setup(tfm, authsize);
 }
@@ -1223,7 +1193,7 @@
 	seq = cpu_to_be64(req->seq);
 	memcpy(req->giv + ivsize - len, &seq, len);
 	return aead_perform(&req->areq, 1, req->areq.assoclen,
-			req->areq.cryptlen +ivsize, req->giv);
+			req->areq.cryptlen + ivsize, req->giv);
 }
 
 static struct ixp_alg ixp4xx_algos[] = {
@@ -1420,7 +1390,7 @@
 static int __init ixp_module_init(void)
 {
 	int num = ARRAY_SIZE(ixp4xx_algos);
-	int i,err ;
+	int i, err;
 
 	if (platform_device_register(&pseudo_dev))
 		return -ENODEV;
@@ -1433,18 +1403,14 @@
 		platform_device_unregister(&pseudo_dev);
 		return err;
 	}
-	for (i=0; i< num; i++) {
+	for (i = 0; i < num; i++) {
 		struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
 
 		if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
-			"%s"IXP_POSTFIX, cra->cra_name) >=
-			CRYPTO_MAX_ALG_NAME)
-		{
+			"%s"IXP_POSTFIX, cra->cra_name) >= CRYPTO_MAX_ALG_NAME)
 			continue;
-		}
-		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
+		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
 			continue;
-		}
 		if (!ixp4xx_algos[i].hash) {
 			/* block ciphers */
 			cra->cra_type = &crypto_ablkcipher_type;
@@ -1488,7 +1454,7 @@
 	int num = ARRAY_SIZE(ixp4xx_algos);
 	int i;
 
-	for (i=0; i< num; i++) {
+	for (i = 0; i < num; i++) {
 		if (ixp4xx_algos[i].registered)
 			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
 	}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 93359fa..e064ee9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -124,6 +124,129 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called eql.  If unsure, say N.
 
+config IMQ
+	tristate "IMQ (intermediate queueing device) support"
+	depends on NETDEVICES && NETFILTER
+	---help---
+	  The IMQ device(s) is used as placeholder for QoS queueing
+	  disciplines. Every packet entering/leaving the IP stack can be
+	  directed through the IMQ device where it's enqueued/dequeued to the
+	  attached qdisc. This allows you to treat network devices as classes
+	  and distribute bandwidth among them. Iptables is used to specify
+	  through which IMQ device, if any, packets travel.
+
+	  More information at: http://www.linuximq.net/
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called imq.  If unsure, say N.
+
+choice
+	prompt "IMQ behavior (PRE/POSTROUTING)"
+	depends on IMQ
+	default IMQ_BEHAVIOR_AB
+	help
+
+		This settings defines how IMQ behaves in respect to its
+		hooking in PREROUTING and POSTROUTING.
+
+		IMQ can work in any of the following ways:
+
+		    PREROUTING   |      POSTROUTING
+		-----------------|-------------------
+		#1  After NAT    |      After NAT
+		#2  After NAT    |      Before NAT
+		#3  Before NAT   |      After NAT
+		#4  Before NAT   |      Before NAT
+
+		The default behavior is to hook before NAT on PREROUTING
+		and after NAT on POSTROUTING (#3).
+
+		This settings are specially usefull when trying to use IMQ
+		to shape NATed clients.
+
+		More information can be found at: www.linuximq.net
+
+		If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_AA
+	bool "IMQ AA"
+	help
+		This settings defines how IMQ behaves in respect to its
+		hooking in PREROUTING and POSTROUTING.
+
+		Choosing this option will make IMQ hook like this:
+
+		PREROUTING:   After NAT
+		POSTROUTING:  After NAT
+
+		More information can be found at: www.linuximq.net
+
+		If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_AB
+	bool "IMQ AB"
+	help
+		This settings defines how IMQ behaves in respect to its
+		hooking in PREROUTING and POSTROUTING.
+
+		Choosing this option will make IMQ hook like this:
+
+		PREROUTING:   After NAT
+		POSTROUTING:  Before NAT
+
+		More information can be found at: www.linuximq.net
+
+		If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_BA
+	bool "IMQ BA"
+	help
+		This settings defines how IMQ behaves in respect to its
+		hooking in PREROUTING and POSTROUTING.
+
+		Choosing this option will make IMQ hook like this:
+
+		PREROUTING:   Before NAT
+		POSTROUTING:  After NAT
+
+		More information can be found at: www.linuximq.net
+
+		If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_BB
+	bool "IMQ BB"
+	help
+		This settings defines how IMQ behaves in respect to its
+		hooking in PREROUTING and POSTROUTING.
+
+		Choosing this option will make IMQ hook like this:
+
+		PREROUTING:   Before NAT
+		POSTROUTING:  Before NAT
+
+		More information can be found at: www.linuximq.net
+
+		If not sure leave the default settings alone.
+
+endchoice
+
+config IMQ_NUM_DEVS
+
+	int "Number of IMQ devices"
+	range 2 16
+	depends on IMQ
+	default "16"
+	help
+
+		This settings defines how many IMQ devices will be
+		created.
+
+		The default value is 16.
+
+		More information can be found at: www.linuximq.net
+
+		If not sure leave the default settings alone.
+
 config TUN
 	tristate "Universal TUN/TAP device driver support"
 	select CRC32
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d5ce011..267682e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -176,6 +176,7 @@
 obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
 
 obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_IMQ) += imq.o
 obj-$(CONFIG_IFB) += ifb.o
 obj-$(CONFIG_MACVLAN) += macvlan.o
 obj-$(CONFIG_MACVTAP) += macvtap.o
diff --git a/drivers/net/imq.c b/drivers/net/imq.c
new file mode 100644
index 0000000..7f8737a
--- /dev/null
+++ b/drivers/net/imq.c
@@ -0,0 +1,777 @@
+/*
+ *             Pseudo-driver for the intermediate queue device.
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ * Authors:    Patrick McHardy, <kaber@trash.net>
+ *
+ *            The first version was written by Martin Devera, <devik@cdi.cz>
+ *
+ * Credits:    Jan Rafaj <imq2t@cedric.vabo.cz>
+ *              - Update patch to 2.4.21
+ *             Sebastian Strollo <sstrollo@nortelnetworks.com>
+ *              - Fix "Dead-loop on netdevice imq"-issue
+ *             Marcel Sebek <sebek64@post.cz>
+ *              - Update to 2.6.2-rc1
+ *
+ *	       After some time of inactivity there is a group taking care
+ *	       of IMQ again: http://www.linuximq.net
+ *
+ *
+ *	       2004/06/30 - New version of IMQ patch to kernels <=2.6.7
+ *             including the following changes:
+ *
+ *	       - Correction of ipv6 support "+"s issue (Hasso Tepper)
+ *	       - Correction of imq_init_devs() issue that resulted in
+ *	       kernel OOPS unloading IMQ as module (Norbert Buchmuller)
+ *	       - Addition of functionality to choose number of IMQ devices
+ *	       during kernel config (Andre Correa)
+ *	       - Addition of functionality to choose how IMQ hooks on
+ *	       PRE and POSTROUTING (after or before NAT) (Andre Correa)
+ *	       - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
+ *
+ *
+ *             2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
+ *             released with almost no problems. 2.6.14-x was released
+ *             with some important changes: nfcache was removed; After
+ *             some weeks of trouble we figured out that some IMQ fields
+ *             in skb were missing in skbuff.c - skb_clone and copy_skb_header.
+ *             These functions are correctly patched by this new patch version.
+ *
+ *             Thanks for all who helped to figure out all the problems with
+ *             2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
+ *             Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
+ *             I didn't forget anybody). I apologize again for my lack of time.
+ *
+ *
+ *             2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
+ *             of qdisc_restart() and moved qdisc_run() to tasklet to avoid
+ *             recursive locking. New initialization routines to fix 'rmmod' not
+ *             working anymore. Used code from ifb.c. (Jussi Kivilinna)
+ *
+ *             2008/08/06 - 2.6.26 - (JK)
+ *              - Replaced tasklet with 'netif_schedule()'.
+ *              - Cleaned up and added comments for imq_nf_queue().
+ *
+ *             2009/04/12
+ *              - Add skb_save_cb/skb_restore_cb helper functions for backuping
+ *                control buffer. This is needed because qdisc-layer on kernels
+ *                2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
+ *              - Add better locking for IMQ device. Hopefully this will solve
+ *                SMP issues. (Jussi Kivilinna)
+ *              - Port to 2.6.27
+ *              - Port to 2.6.28
+ *              - Port to 2.6.29 + fix rmmod not working
+ *
+ *             2009/04/20 - (Jussi Kivilinna)
+ *              - Use netdevice feature flags to avoid extra packet handling
+ *                by core networking layer and possibly increase performance.
+ *
+ *             2009/09/26 - (Jussi Kivilinna)
+ *              - Add imq_nf_reinject_lockless to fix deadlock with
+ *                imq_nf_queue/imq_nf_reinject.
+ *
+ *             2009/12/08 - (Jussi Kivilinna)
+ *              - Port to 2.6.32
+ *              - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
+ *              - Also add better error checking for skb->nf_queue_entry usage
+ *
+ *             2010/02/25 - (Jussi Kivilinna)
+ *              - Port to 2.6.33
+ *
+ *             2010/08/15 - (Jussi Kivilinna)
+ *              - Port to 2.6.35
+ *              - Simplify hook registration by using nf_register_hooks.
+ *              - nf_reinject doesn't need spinlock around it, therefore remove
+ *                imq_nf_reinject function. Other nf_reinject users protect
+ *                their own data with spinlock. With IMQ however all data is
+ *                needed is stored per skbuff, so no locking is needed.
+ *              - Changed IMQ to use 'separate' NF_IMQ_QUEUE instead of
+ *                NF_QUEUE, this allows working coexistance of IMQ and other
+ *                NF_QUEUE users.
+ *              - Make IMQ multi-queue. Number of IMQ device queues can be
+ *                increased with 'numqueues' module parameters. Default number
+ *                of queues is 1, in other words by default IMQ works as
+ *                single-queue device. Multi-queue selection is based on
+ *                IFB multi-queue patch by Changli Gao <xiaosuo@gmail.com>.
+ *
+ *             2011/03/18 - (Jussi Kivilinna)
+ *              - Port to 2.6.38
+ *
+ *	       Also, many thanks to pablo Sebastian Greco for making the initial
+ *	       patch and to those who helped the testing.
+ *
+ *             More info at: http://www.linuximq.net/ (Andre Correa)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	#include <linux/netfilter_ipv6.h>
+#endif
+#include <linux/imq.h>
+#include <net/pkt_sched.h>
+#include <net/netfilter/nf_queue.h>
+#include <net/sock.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/if_pppox.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
+
+static nf_hookfn imq_nf_hook;
+
+static struct nf_hook_ops imq_ops[] = {
+	{
+	/* imq_ingress_ipv4 */
+		.hook		= imq_nf_hook,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_INET_PRE_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+		.priority	= NF_IP_PRI_MANGLE + 1,
+#else
+		.priority	= NF_IP_PRI_NAT_DST + 1,
+#endif
+	},
+	{
+	/* imq_egress_ipv4 */
+		.hook		= imq_nf_hook,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_INET_POST_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
+		.priority	= NF_IP_PRI_LAST,
+#else
+		.priority	= NF_IP_PRI_NAT_SRC - 1,
+#endif
+	},
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	{
+	/* imq_ingress_ipv6 */
+		.hook		= imq_nf_hook,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET6,
+		.hooknum	= NF_INET_PRE_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+		.priority	= NF_IP6_PRI_MANGLE + 1,
+#else
+		.priority	= NF_IP6_PRI_NAT_DST + 1,
+#endif
+	},
+	{
+	/* imq_egress_ipv6 */
+		.hook		= imq_nf_hook,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET6,
+		.hooknum	= NF_INET_POST_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
+		.priority	= NF_IP6_PRI_LAST,
+#else
+		.priority	= NF_IP6_PRI_NAT_SRC - 1,
+#endif
+	},
+#endif
+};
+
+#if defined(CONFIG_IMQ_NUM_DEVS)
+static int numdevs = CONFIG_IMQ_NUM_DEVS;
+#else
+static int numdevs = IMQ_MAX_DEVS;
+#endif
+
+#define IMQ_MAX_QUEUES 32
+static int numqueues = 1;
+
+/*static DEFINE_SPINLOCK(imq_nf_queue_lock);*/
+
+static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
+
+
+static struct net_device_stats *imq_get_stats(struct net_device *dev)
+{
+	return &dev->stats;
+}
+
+/* called for packets kfree'd in qdiscs at places other than enqueue */
+static void imq_skb_destructor(struct sk_buff *skb)
+{
+	struct nf_queue_entry *entry = skb->nf_queue_entry;
+
+	skb->nf_queue_entry = NULL;
+
+	if (entry) {
+		nf_queue_entry_release_refs(entry);
+		kfree(entry);
+	}
+
+	skb_restore_cb(skb); /* kfree backup */
+}
+
+static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct nf_queue_entry *entry = skb->nf_queue_entry;
+
+	skb->nf_queue_entry = NULL;
+	dev->trans_start = jiffies;
+
+	dev->stats.tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+
+	if (entry == NULL) {
+		/* We don't know what is going on here.. packet is queued for
+		 * imq device, but (probably) not by us.
+		 *
+		 * If this packet was not send here by imq_nf_queue(), then
+		 * skb_save_cb() was not used and skb_free() should not show:
+		 *   WARNING: IMQ: kfree_skb: skb->cb_next:..
+		 * and/or
+		 *   WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
+		 *
+		 * However if this message is shown, then IMQ is somehow broken
+		 * and you should report this to linuximq.net.
+		 */
+
+		/* imq_dev_xmit is black hole that eats all packets, report that
+		 * we eat this packet happily and increase dropped counters.
+		 */
+
+		dev->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+
+		return NETDEV_TX_OK;
+	}
+
+	skb_restore_cb(skb); /* restore skb->cb */
+
+	skb->imq_flags = 0;
+	skb->destructor = NULL;
+
+	nf_reinject(entry, NF_ACCEPT);
+
+	return NETDEV_TX_OK;
+}
+
+static u32 imq_hashrnd;
+
+static inline __be16 pppoe_proto(const struct sk_buff *skb)
+{
+	return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+			sizeof(struct pppoe_hdr)));
+}
+
+static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
+{
+	unsigned int pull_len;
+	u16 protocol = skb->protocol;
+	u32 addr1, addr2;
+	u32 hash, ihl = 0;
+	union {
+		u16 in16[2];
+		u32 in32;
+	} ports;
+	u8 ip_proto;
+
+	pull_len = 0;
+
+recheck:
+	switch (protocol) {
+	case htons(ETH_P_8021Q): {
+		if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
+			goto other;
+
+		pull_len += VLAN_HLEN;
+		skb->network_header += VLAN_HLEN;
+
+		protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+		goto recheck;
+	}
+
+	case htons(ETH_P_PPP_SES): {
+		if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
+			goto other;
+
+		pull_len += PPPOE_SES_HLEN;
+		skb->network_header += PPPOE_SES_HLEN;
+
+		protocol = pppoe_proto(skb);
+		goto recheck;
+	}
+
+	case htons(ETH_P_IP): {
+		const struct iphdr *iph = ip_hdr(skb);
+
+		if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
+			goto other;
+
+		addr1 = iph->daddr;
+		addr2 = iph->saddr;
+
+		ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
+				 iph->protocol : 0;
+		ihl = ip_hdrlen(skb);
+
+		break;
+	}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	case htons(ETH_P_IPV6): {
+		const struct ipv6hdr *iph = ipv6_hdr(skb);
+
+		if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
+			goto other;
+
+		addr1 = iph->daddr.s6_addr32[3];
+		addr2 = iph->saddr.s6_addr32[3];
+		ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto);
+		if (unlikely(ihl < 0))
+			goto other;
+
+		break;
+	}
+#endif
+	default:
+other:
+		if (pull_len != 0) {
+			skb_push(skb, pull_len);
+			skb->network_header -= pull_len;
+		}
+
+		return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
+	}
+
+	if (addr1 > addr2)
+		swap(addr1, addr2);
+
+	switch (ip_proto) {
+	case IPPROTO_TCP:
+	case IPPROTO_UDP:
+	case IPPROTO_DCCP:
+	case IPPROTO_ESP:
+	case IPPROTO_AH:
+	case IPPROTO_SCTP:
+	case IPPROTO_UDPLITE: {
+		if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
+			if (ports.in16[0] > ports.in16[1])
+				swap(ports.in16[0], ports.in16[1]);
+			break;
+		}
+		/* fall-through */
+	}
+	default:
+		ports.in32 = 0;
+		break;
+	}
+
+	if (pull_len != 0) {
+		skb_push(skb, pull_len);
+		skb->network_header -= pull_len;
+	}
+
+	hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
+
+	return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
+}
+
+static inline bool sk_tx_queue_recorded(struct sock *sk)
+{
+	return (sk_tx_queue_get(sk) >= 0);
+}
+
+static struct netdev_queue *imq_select_queue(struct net_device *dev,
+						struct sk_buff *skb)
+{
+	u16 queue_index = 0;
+	u32 hash;
+
+	if (likely(dev->real_num_tx_queues == 1))
+		goto out;
+
+	/* IMQ can be receiving ingress or engress packets. */
+
+	/* Check first for if rx_queue is set */
+	if (skb_rx_queue_recorded(skb)) {
+		queue_index = skb_get_rx_queue(skb);
+		goto out;
+	}
+
+	/* Check if socket has tx_queue set */
+	if (sk_tx_queue_recorded(skb->sk)) {
+		queue_index = sk_tx_queue_get(skb->sk);
+		goto out;
+	}
+
+	/* Try use socket hash */
+	if (skb->sk && skb->sk->sk_hash) {
+		hash = skb->sk->sk_hash;
+		queue_index =
+			(u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
+		goto out;
+	}
+
+	/* Generate hash from packet data */
+	queue_index = imq_hash(dev, skb);
+
+out:
+	if (unlikely(queue_index >= dev->real_num_tx_queues))
+		queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
+
+	return netdev_get_tx_queue(dev, queue_index);
+}
+
+static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
+{
+	struct net_device *dev;
+	struct sk_buff *skb_orig, *skb, *skb_shared;
+	struct Qdisc *q;
+	struct netdev_queue *txq;
+	spinlock_t *root_lock;
+	int users, index;
+	int retval = -EINVAL;
+
+	index = entry->skb->imq_flags & IMQ_F_IFMASK;
+	if (unlikely(index > numdevs - 1)) {
+		if (net_ratelimit())
+			printk(KERN_WARNING
+			       "IMQ: invalid device specified, highest is %u\n",
+			       numdevs - 1);
+		retval = -EINVAL;
+		goto out;
+	}
+
+	/* check for imq device by index from cache */
+	dev = imq_devs_cache[index];
+	if (unlikely(!dev)) {
+		char buf[8];
+
+		/* get device by name and cache result */
+		snprintf(buf, sizeof(buf), "imq%d", index);
+		dev = dev_get_by_name(&init_net, buf);
+		if (unlikely(!dev)) {
+			/* not found ?!*/
+			BUG();
+			retval = -ENODEV;
+			goto out;
+		}
+
+		imq_devs_cache[index] = dev;
+		dev_put(dev);
+	}
+
+	if (unlikely(!(dev->flags & IFF_UP))) {
+		entry->skb->imq_flags = 0;
+		nf_reinject(entry, NF_ACCEPT);
+		retval = 0;
+		goto out;
+	}
+	dev->last_rx = jiffies;
+
+	skb = entry->skb;
+	skb_orig = NULL;
+
+	/* skb has owner? => make clone */
+	if (unlikely(skb->destructor)) {
+		skb_orig = skb;
+		skb = skb_clone(skb, GFP_ATOMIC);
+		if (unlikely(!skb)) {
+			retval = -ENOMEM;
+			goto out;
+		}
+		entry->skb = skb;
+	}
+
+	skb->nf_queue_entry = entry;
+
+	dev->stats.rx_bytes += skb->len;
+	dev->stats.rx_packets++;
+
+	/* Disables softirqs for lock below */
+	rcu_read_lock_bh();
+
+	/* Multi-queue selection */
+	txq = imq_select_queue(dev, skb);
+
+	q = rcu_dereference(txq->qdisc);
+	if (unlikely(!q->enqueue))
+		goto packet_not_eaten_by_imq_dev;
+
+	root_lock = qdisc_lock(q);
+	spin_lock(root_lock);
+
+	users = atomic_read(&skb->users);
+
+	skb_shared = skb_get(skb); /* increase reference count by one */
+	skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
+					overwrite it */
+	qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
+
+	if (likely(atomic_read(&skb_shared->users) == users + 1)) {
+		kfree_skb(skb_shared); /* decrease reference count by one */
+
+		skb->destructor = &imq_skb_destructor;
+
+		/* cloned? */
+		if (unlikely(skb_orig))
+			kfree_skb(skb_orig); /* free original */
+
+		spin_unlock(root_lock);
+		rcu_read_unlock_bh();
+
+		/* schedule qdisc dequeue */
+		__netif_schedule(q);
+
+		retval = 0;
+		goto out;
+	} else {
+		skb_restore_cb(skb_shared); /* restore skb->cb */
+		skb->nf_queue_entry = NULL;
+		/* qdisc dropped packet and decreased skb reference count of
+		 * skb, so we don't really want to and try refree as that would
+		 * actually destroy the skb. */
+		spin_unlock(root_lock);
+		goto packet_not_eaten_by_imq_dev;
+	}
+
+packet_not_eaten_by_imq_dev:
+	rcu_read_unlock_bh();
+
+	/* cloned? restore original */
+	if (unlikely(skb_orig)) {
+		kfree_skb(skb);
+		entry->skb = skb_orig;
+	}
+	retval = -1;
+out:
+	return retval;
+}
+
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
+				const struct net_device *indev,
+				const struct net_device *outdev,
+				int (*okfn)(struct sk_buff *))
+{
+	return (pskb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
+}
+
+static int imq_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int imq_open(struct net_device *dev)
+{
+	netif_start_queue(dev);
+	return 0;
+}
+
+static const struct net_device_ops imq_netdev_ops = {
+	.ndo_open		= imq_open,
+	.ndo_stop		= imq_close,
+	.ndo_start_xmit		= imq_dev_xmit,
+	.ndo_get_stats		= imq_get_stats,
+};
+
+static void imq_setup(struct net_device *dev)
+{
+	dev->netdev_ops		= &imq_netdev_ops;
+	dev->type               = ARPHRD_VOID;
+	dev->mtu                = 16000;
+	dev->tx_queue_len       = 11000;
+	dev->flags              = IFF_NOARP;
+	dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST |
+				  NETIF_F_GSO | NETIF_F_HW_CSUM |
+				  NETIF_F_HIGHDMA;
+	dev->priv_flags		&= ~IFF_XMIT_DST_RELEASE;
+}
+
+static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	int ret = 0;
+
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
+			ret = -EINVAL;
+			goto end;
+		}
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
+			ret = -EADDRNOTAVAIL;
+			goto end;
+		}
+	}
+	return 0;
+end:
+	printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
+	return ret;
+}
+
+static struct rtnl_link_ops imq_link_ops __read_mostly = {
+	.kind		= "imq",
+	.priv_size	= 0,
+	.setup		= imq_setup,
+	.validate	= imq_validate,
+};
+
+static const struct nf_queue_handler imq_nfqh = {
+	.name  = "imq",
+	.outfn = imq_nf_queue,
+};
+
+static int __init imq_init_hooks(void)
+{
+	int ret;
+
+	nf_register_queue_imq_handler(&imq_nfqh);
+
+	ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
+	if (ret < 0)
+		nf_unregister_queue_imq_handler();
+
+	return ret;
+}
+
+static int __init imq_init_one(int index)
+{
+	struct net_device *dev;
+	int ret;
+
+	dev = alloc_netdev_mq(0, "imq%d", imq_setup, numqueues);
+	if (!dev)
+		return -ENOMEM;
+
+	ret = dev_alloc_name(dev, dev->name);
+	if (ret < 0)
+		goto fail;
+
+	dev->rtnl_link_ops = &imq_link_ops;
+	ret = register_netdevice(dev);
+	if (ret < 0)
+		goto fail;
+
+	return 0;
+fail:
+	free_netdev(dev);
+	return ret;
+}
+
+static int __init imq_init_devs(void)
+{
+	int err, i;
+
+	if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
+		printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
+		       IMQ_MAX_DEVS);
+		return -EINVAL;
+	}
+
+	if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
+		printk(KERN_ERR "IMQ: numqueues has to be betweed 1 and %u\n",
+		       IMQ_MAX_QUEUES);
+		return -EINVAL;
+	}
+
+	get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
+
+	rtnl_lock();
+	err = __rtnl_link_register(&imq_link_ops);
+
+	for (i = 0; i < numdevs && !err; i++)
+		err = imq_init_one(i);
+
+	if (err) {
+		__rtnl_link_unregister(&imq_link_ops);
+		memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
+	}
+	rtnl_unlock();
+
+	return err;
+}
+
+static int __init imq_init_module(void)
+{
+	int err;
+
+#if defined(CONFIG_IMQ_NUM_DEVS)
+	BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
+	BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
+	BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
+#endif
+
+	err = imq_init_devs();
+	if (err) {
+		printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
+		return err;
+	}
+
+	err = imq_init_hooks();
+	if (err) {
+		printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
+		rtnl_link_unregister(&imq_link_ops);
+		memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
+		return err;
+	}
+
+	printk(KERN_INFO "IMQ driver loaded successfully. "
+		"(numdevs = %d, numqueues = %d)\n", numdevs, numqueues);
+
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+	printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
+#else
+	printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
+#endif
+#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+	printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
+#else
+	printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
+#endif
+
+	return 0;
+}
+
+static void __exit imq_unhook(void)
+{
+	nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
+	nf_unregister_queue_imq_handler();
+}
+
+static void __exit imq_cleanup_devs(void)
+{
+	rtnl_link_unregister(&imq_link_ops);
+	memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
+}
+
+static void __exit imq_exit_module(void)
+{
+	imq_unhook();
+	imq_cleanup_devs();
+	printk(KERN_INFO "IMQ driver unloaded successfully.\n");
+}
+
+module_init(imq_init_module);
+module_exit(imq_exit_module);
+
+module_param(numdevs, int, 0);
+module_param(numqueues, int, 0);
+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
+			"be created)");
+MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
+			"http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("imq");
+
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index f1e1643..8accd0d 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -1,7 +1,7 @@
 /*
  * Intel IXP4xx HSS (synchronous serial port) driver for Linux
  *
- * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
+ * Copyright (C) 2007-2010 Krzysztof Hałasa <khc@pm.waw.pl>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License
@@ -16,8 +16,11 @@
 #include <linux/hdlc.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/mutex.h>
 #include <linux/platform_device.h>
 #include <linux/poll.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <mach/npe.h>
 #include <mach/qmgr.h>
@@ -27,10 +30,12 @@
 #define DEBUG_TX		0
 #define DEBUG_PKT_BYTES		0
 #define DEBUG_CLOSE		0
+#define DEBUG_FRAMER		0
 
 #define DRV_NAME		"ixp4xx_hss"
 
 #define PKT_EXTRA_FLAGS		0 /* orig 1 */
+#define TX_FRAME_SYNC_OFFSET	0 /* channelized */
 #define PKT_NUM_PIPES		1 /* 1, 2 or 4 */
 #define PKT_PIPE_FIFO_SIZEW	4 /* total 4 dwords per HSS */
 
@@ -41,11 +46,21 @@
 #define RX_SIZE			(HDLC_MAX_MRU + 4) /* NPE needs more space */
 #define MAX_CLOSE_WAIT		1000 /* microseconds */
 #define HSS_COUNT		2
-#define FRAME_SIZE		256 /* doesn't matter at this point */
-#define FRAME_OFFSET		0
-#define MAX_CHANNELS		(FRAME_SIZE / 8)
+#define MIN_FRAME_SIZE		16   /* bits */
+#define MAX_FRAME_SIZE		257  /* 256 bits + framing bit */
+#define MAX_CHANNELS		(MAX_FRAME_SIZE / 8)
+#define MAX_CHAN_DEVICES	32
+#define CHANNEL_HDLC		0xFE
+#define CHANNEL_UNUSED		0xFF
 
 #define NAPI_WEIGHT		16
+#define CHAN_RX_TRIGGER		16 /* 8 RX frames = 1 ms @ E1 */
+#define CHAN_RX_FRAMES		64
+#define G704_FRAME_SIZE		256 /* E1 only */
+#define CHAN_TX_LIST_FRAMES	16 /* bytes/channel per list, 16 - 48 */
+#define CHAN_TX_LISTS		8
+#define CHAN_TX_FRAMES		(CHAN_TX_LIST_FRAMES * CHAN_TX_LISTS)
+#define CHAN_QUEUE_LEN		16 /* minimum possible */
 
 /* Queue IDs */
 #define HSS0_CHL_RXTRIG_QUEUE	12	/* orig size = 32 dwords */
@@ -218,6 +233,23 @@
 /* triggers the NPE to return an HssErrorReadResponse message */
 #define PORT_ERROR_READ			0x42
 
+/* reset NPE internal status and enable the HssChannelized operation */
+#define CHAN_FLOW_ENABLE		0x43
+#define CHAN_FLOW_DISABLE		0x44
+#define CHAN_IDLE_PATTERN_WRITE		0x45
+#define CHAN_NUM_CHANS_WRITE		0x46
+#define CHAN_RX_BUF_ADDR_WRITE		0x47
+#define CHAN_RX_BUF_CFG_WRITE		0x48
+#define CHAN_TX_BLK_CFG_WRITE		0x49
+#define CHAN_TX_BUF_ADDR_WRITE		0x4A
+#define CHAN_TX_BUF_SIZE_WRITE		0x4B
+#define CHAN_TSLOTSWITCH_ENABLE		0x4C
+#define CHAN_TSLOTSWITCH_DISABLE	0x4D
+
+/* downloads the gainWord value for a timeslot switching channel associated
+   with bypassNum */
+#define CHAN_TSLOTSWITCH_GCT_DOWNLOAD	0x4E
+
 /* triggers the NPE to reset internal status and enable the HssPacketized
    operation for the flow specified by pPipe */
 #define PKT_PIPE_FLOW_ENABLE		0x50
@@ -240,6 +272,9 @@
 #define ERR_DISCONNECTING	7 /* disconnect is in progress */
 
 
+enum mode {MODE_HDLC = 0, MODE_RAW, MODE_G704};
+enum error_bit {TX_ERROR_BIT = 0, RX_ERROR_BIT = 1};
+
 #ifdef __ARMEB__
 typedef struct sk_buff buffer_t;
 #define free_buffer dev_kfree_skb
@@ -250,6 +285,17 @@
 #define free_buffer_irq kfree
 #endif
 
+struct chan_device {
+	struct cdev cdev;
+	struct device *dev;
+	struct port *port;
+	unsigned int open_count, excl_open;
+	unsigned int tx_first, tx_count, rx_first, rx_count; /* bytes */
+	unsigned long errors_bitmap;
+	u8 id, chan_count;
+	u8 log_channels[MAX_CHANNELS];
+};
+
 struct port {
 	struct device *dev;
 	struct npe *npe;
@@ -260,10 +306,34 @@
 	struct desc *desc_tab;	/* coherent */
 	u32 desc_tab_phys;
 	unsigned int id;
-	unsigned int clock_type, clock_rate, loopback;
-	unsigned int initialized, carrier;
+	atomic_t chan_tx_irq_number, chan_rx_irq_number;
+	wait_queue_head_t chan_tx_waitq, chan_rx_waitq;
 	u8 hdlc_cfg;
+
+	unsigned int initialized; /* protected by firmware_mutex */
+
+	/* the following fields must be protected by rtnl */
+	enum mode mode;			/* RW: set_mode() */
+	unsigned int port_open_count;	/* RW: hss_port_open() and hss_port_close() */
+	unsigned int chan_open_count;	/* hss_chan_open() and hss_chan_close() */
+	unsigned int hdlc_open;		/* RW: hss_hdlc_open() and hss_hdlc_close() */
+	unsigned int clock_rate;	/* RW: hss_hdlc_ioctl()  */
+
+	/* the following fields must be protected by rtnl or npe_lock (read) and both (write) */
+	unsigned int clock_type, frame_size, loopback;
 	u32 clock_reg;
+
+	/* the following fields must be protected by npe_lock */
+	unsigned int aligned, carrier, frame_sync_offset, sync_counter;
+
+	struct chan_device *chan_devices[MAX_CHAN_DEVICES];
+	u8 *chan_buf;
+	u32 chan_tx_buf_phys, chan_rx_buf_phys;
+	unsigned int chan_started, chan_last_rx, chan_last_tx;
+
+	/* assigned channels, may be invalid with given frame length or mode */
+	u8 channels[MAX_CHANNELS];
+	int msg_count;
 };
 
 /* NPE message structure */
@@ -316,20 +386,38 @@
 				 ((n) + RX_DESCS) * sizeof(struct desc))
 #define tx_desc_ptr(port, n)	(&(port)->desc_tab[(n) + RX_DESCS])
 
+#define chan_tx_buf_len(port)	(port->frame_size / 8 * CHAN_TX_FRAMES)
+#define chan_tx_lists_len(port)	(port->frame_size / 8 * CHAN_TX_LISTS * \
+				 sizeof(u32))
+#define chan_rx_buf_len(port)	(port->frame_size / 8 * CHAN_RX_FRAMES)
+
+#define chan_tx_buf(port)	((port)->chan_buf)
+#define chan_tx_lists(port)	(chan_tx_buf(port) + chan_tx_buf_len(port))
+#define chan_rx_buf(port)	(chan_tx_lists(port) + chan_tx_lists_len(port))
+
+#define chan_tx_lists_phys(port) ((port)->chan_tx_buf_phys +	\
+				  chan_tx_buf_len(port))
+
+static int hss_chan_open(struct port *port);
+void hss_chan_close(struct port *port);
+
 /*****************************************************************************
  * global variables
  ****************************************************************************/
 
-static int ports_open;
+static struct class *hss_class;
+static int chan_major;
+static unsigned int dma_pool_use_count; /* protected by rtnl */
 static struct dma_pool *dma_pool;
 static spinlock_t npe_lock;
+static DEFINE_MUTEX(firmware_mutex);
 
 static const struct {
-	int tx, txdone, rx, rxfree;
+	int tx, txdone, rx, rxfree, chan;
 }queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
-		  HSS0_PKT_RXFREE0_QUEUE},
+		  HSS0_PKT_RXFREE0_QUEUE, HSS0_CHL_RXTRIG_QUEUE},
 		 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
-		  HSS1_PKT_RXFREE0_QUEUE},
+		  HSS1_PKT_RXFREE0_QUEUE, HSS1_CHL_RXTRIG_QUEUE},
 };
 
 /*****************************************************************************
@@ -341,6 +429,11 @@
 	return dev_to_hdlc(dev)->priv;
 }
 
+static inline struct chan_device* inode_to_chan_dev(struct inode *inode)
+{
+	return container_of(inode->i_cdev, struct chan_device, cdev);
+}
+
 #ifndef __ARMEB__
 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
 {
@@ -350,6 +443,97 @@
 }
 #endif
 
+static int get_number(const char **buf, size_t *len, unsigned int *ptr,
+		      unsigned int min, unsigned int max)
+{
+	char *endp;
+	unsigned long val = simple_strtoul(*buf, &endp, 10);
+
+	if (endp == *buf || endp - *buf > *len || val < min || val > max)
+		return -EINVAL;
+	*len -= endp - *buf;
+	*buf = endp;
+	*ptr = val;
+	return 0;
+}
+
+static int parse_channels(const char **buf, size_t *len, u8 *channels)
+{
+	unsigned int ch, next = 0;
+
+	if (*len && (*buf)[*len - 1] == '\n')
+		(*len)--;
+
+	memset(channels, 0, MAX_CHANNELS);
+
+	if (!*len)
+		return 0;
+
+	/* Format: "A,B-C,...", A > B > C */
+	while (1) {
+		if (get_number(buf, len, &ch, next, MAX_CHANNELS - 1))
+			return -EINVAL;
+		channels[ch] = 1;
+		next = ch + 1;
+		if (!*len)
+			break;
+		if (**buf == ',') {
+			(*buf)++;
+			(*len)--;
+			continue;
+		}
+		if (**buf != '-')
+			return -EINVAL;
+		(*buf)++;
+		(*len)--;
+		if (get_number(buf, len, &ch, next, MAX_CHANNELS - 1))
+			return -EINVAL;
+		while (next <= ch)
+			channels[next++] = 1;
+		if (!*len)
+			break;
+		if (**buf != ',')
+			return -EINVAL;
+		(*buf)++;
+		(*len)--;
+	}
+	return 1;
+}
+
+static size_t print_channels(struct port *port, char *buf, u8 id)
+{
+	unsigned int ch, cnt = 0;
+	size_t len = 0;
+
+	for (ch = 0; ch < MAX_CHANNELS; ch++)
+		if (port->channels[ch] == id) {
+			if (cnt == 0) {
+				sprintf(buf + len, "%s%u", len ? "," : "", ch);
+				len += strlen(buf + len);
+			}
+			cnt++;
+		} else {
+			if (cnt > 1) {
+				sprintf(buf + len, "-%u", ch - 1);
+				len += strlen(buf + len);
+			}
+			cnt = 0;
+		}
+	if (cnt > 1) {
+		sprintf(buf + len, "-%u", ch - 1);
+		len += strlen(buf + len);
+	}
+
+	buf[len++] = '\n';
+	return len;
+}
+
+static inline unsigned int sub_offset(unsigned int a, unsigned int b,
+				      unsigned int modulo)
+{
+	return (modulo /* make sure the result >= 0 */ + a - b) % modulo;
+}
+
 /*****************************************************************************
  * HSS access
  ****************************************************************************/
@@ -365,18 +549,43 @@
 	}
 }
 
-static void hss_config_set_lut(struct port *port)
+static void hss_config_lut(struct port *port)
 {
 	struct msg msg;
-	int ch;
+	int chan_count = 0, log_chan = 0, i, ch;
+
+	for (i = 0; i < MAX_CHAN_DEVICES; i++)
+		if (port->chan_devices[i])
+			port->chan_devices[i]->chan_count = 0;
 
 	memset(&msg, 0, sizeof(msg));
 	msg.cmd = PORT_CONFIG_WRITE;
 	msg.hss_port = port->id;
 
 	for (ch = 0; ch < MAX_CHANNELS; ch++) {
+		struct chan_device *chdev = NULL;
+		unsigned int entry;
+
+		if (port->channels[ch] < MAX_CHAN_DEVICES /* assigned */)
+			chdev = port->chan_devices[port->channels[ch]];
+
+		if (port->mode == MODE_G704 && ch == 0)
+			entry = TDMMAP_VOICE64K; /* PCM-31 pattern */
+		else if (port->mode == MODE_HDLC ||
+			 port->channels[ch] == CHANNEL_HDLC)
+			entry = TDMMAP_HDLC;
+		else if (chdev && chdev->open_count) {
+			entry = TDMMAP_VOICE64K;
+			chdev->log_channels[chdev->chan_count++] = log_chan;
+		} else
+			entry = TDMMAP_UNASSIGNED;
+		if (entry == TDMMAP_VOICE64K) {
+			chan_count++;
+			log_chan++;
+		}
+
 		msg.data32 >>= 2;
-		msg.data32 |= TDMMAP_HDLC << 30;
+		msg.data32 |= entry << 30;
 
 		if (ch % 16 == 15) {
 			msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
@@ -386,9 +595,39 @@
 			hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
 		}
 	}
+
+	if (!chan_count)
+		return;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_NUM_CHANS_WRITE;
+	msg.hss_port = port->id;
+	msg.data8a = chan_count;
+	hss_npe_send(port, &msg, "CHAN_NUM_CHANS_WRITE");
+
+	dma_sync_single_for_cpu(port->dev, port->chan_tx_buf_phys,
+				chan_tx_buf_len(port) + chan_tx_lists_len(port),
+				DMA_TO_DEVICE);
+	/* don't leak data */
+	// FIXME memset(chan_tx_buf(port), 0, CHAN_TX_FRAMES * chan_count);
+	if (port->mode == MODE_G704) /* G.704 PCM-31 sync pattern */
+		for (i = 0; i < CHAN_TX_FRAMES; i += 4)
+			*(u32*)(chan_tx_buf(port) + i) = 0x9BDF9BDF;
+
+	for (i = 0; i < CHAN_TX_LISTS; i++) {
+		u32 phys = port->chan_tx_buf_phys + i * CHAN_TX_LIST_FRAMES;
+		u32 *list = ((u32 *)chan_tx_lists(port)) + i * chan_count;
+		for (ch = 0; ch < chan_count; ch++)
+			list[ch] = phys + ch * CHAN_TX_FRAMES;
+	}
+	dma_sync_single_for_device(port->dev, port->chan_tx_buf_phys,
+			chan_tx_buf_len(port) + chan_tx_lists_len(port),
+			DMA_TO_DEVICE);
 }
 
-static void hss_config(struct port *port)
+static u32 hss_get_status(struct port *port);
+
+static void hss_config_main(struct port *port)
 {
 	struct msg msg;
 
@@ -396,14 +635,23 @@
 	msg.cmd = PORT_CONFIG_WRITE;
 	msg.hss_port = port->id;
 	msg.index = HSS_CONFIG_TX_PCR;
-	msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN |
-		PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
-	if (port->clock_type == CLOCK_INT)
+	msg.data32 = PCR_DCLK_EDGE_RISING | PCR_MSB_ENDIAN | PCR_TX_DATA_ENABLE;
+	if (port->mode == MODE_HDLC)
+		msg.data32 |= PCR_FRM_PULSE_DISABLED;
+	else
+		msg.data32 |= PCR_FRM_SYNC_OUTPUT_RISING;
+	if (port->frame_size % 8 == 0)
+		msg.data32 |= PCR_SOF_NO_FBIT;
+	if ((port->clock_type & CLOCK_TYPE_MASK) == CLOCK_INT)
 		msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
+	if (port->clock_type & CLOCK_TX_INVERTED)
+		msg.data32 ^= PCR_DCLK_EDGE_RISING;
 	hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
 
 	msg.index = HSS_CONFIG_RX_PCR;
-	msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
+	msg.data32 &= ~(PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING);
+	if (port->clock_type & CLOCK_RX_INVERTED)
+		msg.data32 ^= PCR_DCLK_EDGE_RISING;
 	hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
 
 	memset(&msg, 0, sizeof(msg));
@@ -425,19 +673,22 @@
 	msg.cmd = PORT_CONFIG_WRITE;
 	msg.hss_port = port->id;
 	msg.index = HSS_CONFIG_TX_FCR;
-	msg.data16a = FRAME_OFFSET;
-	msg.data16b = FRAME_SIZE - 1;
+	msg.data16a = TX_FRAME_SYNC_OFFSET;
+	msg.data16b = port->frame_size - 1;
 	hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
 
 	memset(&msg, 0, sizeof(msg));
 	msg.cmd = PORT_CONFIG_WRITE;
 	msg.hss_port = port->id;
 	msg.index = HSS_CONFIG_RX_FCR;
-	msg.data16a = FRAME_OFFSET;
-	msg.data16b = FRAME_SIZE - 1;
+	msg.data16a = port->frame_sync_offset;
+	msg.data16b = port->frame_size - 1;
 	hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
+}
 
-	hss_config_set_lut(port);
+static void hss_config_load(struct port *port)
+{
+	struct msg msg;
 
 	memset(&msg, 0, sizeof(msg));
 	msg.cmd = PORT_CONFIG_LOAD;
@@ -456,7 +707,92 @@
 	npe_recv_message(port->npe, &msg, "FLUSH_IT");
 }
 
-static void hss_set_hdlc_cfg(struct port *port)
+static void hss_config(struct port *port)
+{
+	struct msg msg;
+	int started = port->chan_started;
+
+	if (started) {
+		hss_get_status(port);
+		memset(&msg, 0, sizeof(msg));
+		msg.hss_port = port->id;
+		msg.cmd = CHAN_FLOW_DISABLE;
+		hss_npe_send(port, &msg, "CHAN_FLOW_DISABLE");
+
+		/* HDLC mode configuration */
+		memset(&msg, 0, sizeof(msg));
+		msg.cmd = PKT_NUM_PIPES_WRITE;
+		msg.hss_port = port->id;
+		msg.data8a = PKT_NUM_PIPES;
+		hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
+
+		msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
+		msg.data8a = PKT_PIPE_FIFO_SIZEW;
+		hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
+
+		msg.cmd = PKT_PIPE_MODE_WRITE;
+		msg.data8a = NPE_PKT_MODE_HDLC;
+		/* msg.data8b = inv_mask */
+		/* msg.data8c = or_mask */
+		hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
+
+		msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
+		msg.data16a = HDLC_MAX_MRU; /* including CRC */
+		hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
+
+		msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
+		msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
+		hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
+
+		/* Channelized operation settings */
+		memset(&msg, 0, sizeof(msg));
+		msg.cmd = CHAN_TX_BLK_CFG_WRITE;
+		msg.hss_port = port->id;
+		msg.data8b = (CHAN_TX_LIST_FRAMES & ~7) / 2;
+		msg.data8a = msg.data8b / 4;
+		msg.data8d = CHAN_TX_LIST_FRAMES - msg.data8b;
+		msg.data8c = msg.data8d / 4;
+		hss_npe_send(port, &msg, "CHAN_TX_BLK_CFG_WRITE");
+
+		memset(&msg, 0, sizeof(msg));
+		msg.cmd = CHAN_RX_BUF_CFG_WRITE;
+		msg.hss_port = port->id;
+		msg.data8a = CHAN_RX_TRIGGER / 8;
+		msg.data8b = CHAN_RX_FRAMES;
+		hss_npe_send(port, &msg, "CHAN_RX_BUF_CFG_WRITE");
+
+		memset(&msg, 0, sizeof(msg));
+		msg.cmd = CHAN_TX_BUF_SIZE_WRITE;
+		msg.hss_port = port->id;
+		msg.data8a = CHAN_TX_LISTS;
+		hss_npe_send(port, &msg, "CHAN_TX_BUF_SIZE_WRITE");
+	}
+
+	hss_config_main(port);
+	hss_config_lut(port);
+	hss_config_load(port);
+
+	if (started) {
+		memset(&msg, 0, sizeof(msg));
+		msg.cmd = CHAN_RX_BUF_ADDR_WRITE;
+		msg.hss_port = port->id;
+		msg.data32 = port->chan_rx_buf_phys;
+		hss_npe_send(port, &msg, "CHAN_RX_BUF_ADDR_WRITE");
+
+		memset(&msg, 0, sizeof(msg));
+		msg.cmd = CHAN_TX_BUF_ADDR_WRITE;
+		msg.hss_port = port->id;
+		msg.data32 = chan_tx_lists_phys(port);
+		hss_npe_send(port, &msg, "CHAN_TX_BUF_ADDR_WRITE");
+
+		memset(&msg, 0, sizeof(msg));
+		msg.hss_port = port->id;
+		msg.cmd = CHAN_FLOW_ENABLE;
+		hss_npe_send(port, &msg, "CHAN_FLOW_ENABLE");
+	}
+}
+
+static void hss_config_hdlc(struct port *port)
 {
 	struct msg msg;
 
@@ -465,7 +801,7 @@
 	msg.hss_port = port->id;
 	msg.data8a = port->hdlc_cfg; /* rx_cfg */
 	msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
-	hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
+	hss_npe_send(port, &msg, "HSS_HDLC_CFG_WRITE");
 }
 
 static u32 hss_get_status(struct port *port)
@@ -482,10 +818,65 @@
 		BUG();
 	}
 
+	/*
+	  data8a: last RX error bitmap
+	  data8b: last RX error bitmap
+	  data8c: error count
+	  last error bitmap:
+	  - x3: 0 or 2 = no error, 1 = FRM sync error, 3 = overrun (6x = port)
+	  - 1C: 0 = no error, 4 = chan error, 8 = packet error (6x = port)
+	*/
+#if 0
+	printk(KERN_CRIT "HSS-%i: status RX %02X TX %02X error count %u\n", port->id,
+	       msg.data8a, msg.data8b, msg.data8c);
+#endif
 	return msg.data32;
 }
 
-static void hss_start_hdlc(struct port *port)
+static void hss_chan_start(struct port *port)
+{
+	struct msg msg;
+
+	port->chan_last_tx = 0;
+	port->chan_last_rx = 0;
+	port->chan_started = 1;
+	port->sync_counter = 0;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_RX_BUF_ADDR_WRITE;
+	msg.hss_port = port->id;
+	msg.data32 = port->chan_rx_buf_phys;
+	hss_npe_send(port, &msg, "CHAN_RX_BUF_ADDR_WRITE");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_TX_BUF_ADDR_WRITE;
+	msg.hss_port = port->id;
+	msg.data32 = chan_tx_lists_phys(port);
+	hss_npe_send(port, &msg, "CHAN_TX_BUF_ADDR_WRITE");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_FLOW_ENABLE;
+	msg.hss_port = port->id;
+	hss_npe_send(port, &msg, "CHAN_FLOW_ENABLE");
+}
+
+static void hss_chan_stop(struct port *port)
+{
+	struct msg msg;
+
+	if (!port->chan_started)
+		return;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_FLOW_DISABLE;
+	msg.hss_port = port->id;
+	hss_npe_send(port, &msg, "CHAN_FLOW_DISABLE");
+
+	hss_get_status(port); /* make sure it's halted */
+	port->chan_started = 0;
+}
+
+static void hss_hdlc_start(struct port *port)
 {
 	struct msg msg;
 
@@ -496,7 +887,7 @@
 	hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
 }
 
-static void hss_stop_hdlc(struct port *port)
+static void hss_hdlc_stop(struct port *port)
 {
 	struct msg msg;
 
@@ -512,13 +903,16 @@
 	struct msg msg;
 	int err;
 
+	if ((err = mutex_lock_interruptible(&firmware_mutex)))
+		return err;
+
 	if (port->initialized)
-		return 0;
+		goto out;
 
 	if (!npe_running(port->npe) &&
 	    (err = npe_load_firmware(port->npe, npe_name(port->npe),
 				     port->dev)))
-		return err;
+		goto out;
 
 	/* HDLC mode configuration */
 	memset(&msg, 0, sizeof(msg));
@@ -545,8 +939,33 @@
 	msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
 	hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
 
+	/* Channelized operation settings */
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_TX_BLK_CFG_WRITE;
+	msg.hss_port = port->id;
+	msg.data8b = (CHAN_TX_LIST_FRAMES & ~7) / 2;
+	msg.data8a = msg.data8b / 4;
+	msg.data8d = CHAN_TX_LIST_FRAMES - msg.data8b;
+	msg.data8c = msg.data8d / 4;
+	hss_npe_send(port, &msg, "CHAN_TX_BLK_CFG_WRITE");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_RX_BUF_CFG_WRITE;
+	msg.hss_port = port->id;
+	msg.data8a = CHAN_RX_TRIGGER / 8;
+	msg.data8b = CHAN_RX_FRAMES;
+	hss_npe_send(port, &msg, "CHAN_RX_BUF_CFG_WRITE");
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = CHAN_TX_BUF_SIZE_WRITE;
+	msg.hss_port = port->id;
+	msg.data8a = CHAN_TX_LISTS;
+	hss_npe_send(port, &msg, "CHAN_TX_BUF_SIZE_WRITE");
+
 	port->initialized = 1;
-	return 0;
+out:
+	mutex_unlock(&firmware_mutex);
+	return err;
 }
 
 /*****************************************************************************
@@ -622,20 +1041,22 @@
 }
 
 
-static void hss_hdlc_set_carrier(void *pdev, int carrier)
+static void __hss_hdlc_set_carrier(struct port *port)
 {
-	struct net_device *netdev = pdev;
-	struct port *port = dev_to_port(netdev);
+	if (port->loopback || port->carrier)
+		netif_carrier_on(port->netdev);
+	else
+		netif_carrier_off(port->netdev);
+}
+
+static void hss_hdlc_set_carrier_cb(void *pdev, int carrier)
+{
+	struct port *port = dev_to_port(pdev);
 	unsigned long flags;
 
 	spin_lock_irqsave(&npe_lock, flags);
 	port->carrier = carrier;
-	if (!port->loopback) {
-		if (carrier)
-			netif_carrier_on(netdev);
-		else
-			netif_carrier_off(netdev);
-	}
+	__hss_hdlc_set_carrier(port);
 	spin_unlock_irqrestore(&npe_lock, flags);
 }
 
@@ -918,6 +1339,8 @@
 {
 	int err;
 
+	might_sleep();
+
 	err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
 				 "%s:RX-free", port->netdev->name);
 	if (err)
@@ -959,6 +1382,8 @@
 
 static void release_hdlc_queues(struct port *port)
 {
+	might_sleep();
+
 	qmgr_release_queue(queue_ids[port->id].rxfree);
 	qmgr_release_queue(queue_ids[port->id].rx);
 	qmgr_release_queue(queue_ids[port->id].txdone);
@@ -966,50 +1391,7 @@
 	qmgr_release_queue(port->plat->txreadyq);
 }
 
-static int init_hdlc_queues(struct port *port)
-{
-	int i;
-
-	if (!ports_open)
-		if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
-						 POOL_ALLOC_SIZE, 32, 0)))
-			return -ENOMEM;
-
-	if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
-					      &port->desc_tab_phys)))
-		return -ENOMEM;
-	memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
-	memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
-	memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
-
-	/* Setup RX buffers */
-	for (i = 0; i < RX_DESCS; i++) {
-		struct desc *desc = rx_desc_ptr(port, i);
-		buffer_t *buff;
-		void *data;
-#ifdef __ARMEB__
-		if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
-			return -ENOMEM;
-		data = buff->data;
-#else
-		if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
-			return -ENOMEM;
-		data = buff;
-#endif
-		desc->buf_len = RX_SIZE;
-		desc->data = dma_map_single(&port->netdev->dev, data,
-					    RX_SIZE, DMA_FROM_DEVICE);
-		if (dma_mapping_error(&port->netdev->dev, desc->data)) {
-			free_buffer(buff);
-			return -EIO;
-		}
-		port->rx_buff_tab[i] = buff;
-	}
-
-	return 0;
-}
-
-static void destroy_hdlc_queues(struct port *port)
+static void destroy_hdlc_buffs(struct port *port)
 {
 	int i;
 
@@ -1036,36 +1418,111 @@
 		port->desc_tab = NULL;
 	}
 
-	if (!ports_open && dma_pool) {
+	if (!dma_pool_use_count && dma_pool) {
 		dma_pool_destroy(dma_pool);
 		dma_pool = NULL;
 	}
 }
 
+static int init_hdlc_buffs(struct port *port)
+{
+	int i, err = -ENOMEM;
+
+	if (!dma_pool_use_count)
+		if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
+						 POOL_ALLOC_SIZE, 32, 0)))
+			return -ENOMEM;
+
+	if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+					      &port->desc_tab_phys)))
+		goto rel_dma_pool;
+	memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
+	memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
+	memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
+
+	/* Setup RX buffers */
+	for (i = 0; i < RX_DESCS; i++) {
+		struct desc *desc = rx_desc_ptr(port, i);
+		buffer_t *buff;
+		void *data;
+#ifdef __ARMEB__
+		if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
+			goto rel_queues;
+		data = buff->data;
+#else
+		if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
+			goto rel_queues;
+		data = buff;
+#endif
+		desc->buf_len = RX_SIZE;
+		desc->data = dma_map_single(&port->netdev->dev, data,
+					    RX_SIZE, DMA_FROM_DEVICE);
+		if (dma_mapping_error(&port->netdev->dev, desc->data)) {
+			free_buffer(buff);
+			err = -EIO;
+			goto rel_queues;
+		}
+		port->rx_buff_tab[i] = buff;
+	}
+
+	dma_pool_use_count++;
+	return 0;
+
+rel_queues:
+	destroy_hdlc_buffs(port);
+rel_dma_pool:
+	dma_pool_destroy(dma_pool);
+	return err;
+}
+
+static void hss_port_open(struct port *port)
+{
+	might_sleep();
+
+	if (!port->port_open_count++ && port->plat->open)
+		port->plat->open(port->id, port->netdev, hss_hdlc_set_carrier_cb);
+}
+
+static void hss_port_close(struct port *port)
+{
+	might_sleep();
+
+	if (!--port->port_open_count && port->plat->close)
+		port->plat->close(port->id, port->netdev);
+}
+
 static int hss_hdlc_open(struct net_device *dev)
 {
 	struct port *port = dev_to_port(dev);
-	unsigned long flags;
 	int i, err = 0;
 
+	if (port->mode == MODE_G704 && port->channels[0] == CHANNEL_HDLC)
+		return -EBUSY; /* channel #0 is used for G.704 framing */
+
+	if (port->mode != MODE_HDLC)
+		for (i = port->frame_size / 8; i < MAX_CHANNELS; i++)
+			if (port->channels[i] == CHANNEL_HDLC)
+				return -ECHRNG; /* frame too short */
+
+	if ((err = hss_load_firmware(port)))
+		return err;
+
 	if ((err = hdlc_open(dev)))
 		return err;
 
-	if ((err = hss_load_firmware(port)))
+	if ((err = request_hdlc_queues(port))) {
+		printk(KERN_INFO "HSS-%i: Unable to request QMgr HDLC queues\n", port->id);
 		goto err_hdlc_close;
+	}
 
-	if ((err = request_hdlc_queues(port)))
-		goto err_hdlc_close;
-
-	if ((err = init_hdlc_queues(port)))
+	if ((err = init_hdlc_buffs(port)))
 		goto err_destroy_queues;
 
-	spin_lock_irqsave(&npe_lock, flags);
-	if (port->plat->open)
-		if ((err = port->plat->open(port->id, dev,
-					    hss_hdlc_set_carrier)))
-			goto err_unlock;
-	spin_unlock_irqrestore(&npe_lock, flags);
+	if (port->mode == MODE_G704)
+		if ((err = hss_chan_open(port)))
+			goto free_buffs;
+
+	hss_port_open(port);
 
 	/* Populate queues with buffers, no failure after this point */
 	for (i = 0; i < TX_DESCS; i++)
@@ -1086,21 +1543,28 @@
 		     hss_hdlc_txdone_irq, dev);
 	qmgr_enable_irq(queue_ids[port->id].txdone);
 
-	ports_open++;
+	dma_pool_use_count++;
+	port->hdlc_open = 1;
 
-	hss_set_hdlc_cfg(port);
+	spin_lock_irq(&npe_lock);
+	hss_config_hdlc(port);
 	hss_config(port);
 
-	hss_start_hdlc(port);
+	if (port->mode == MODE_G704)
+		hss_chan_start(port);
+
+	hss_hdlc_start(port);
+	port->carrier = port->plat->get_carrier ? port->plat->get_carrier(port->id) : 1;
+	__hss_hdlc_set_carrier(port);
+	spin_unlock_irq(&npe_lock);
 
 	/* we may already have RX data, enables IRQ */
 	napi_schedule(&port->napi);
 	return 0;
 
-err_unlock:
-	spin_unlock_irqrestore(&npe_lock, flags);
+free_buffs:
+	destroy_hdlc_buffs(port);
 err_destroy_queues:
-	destroy_hdlc_queues(port);
 	release_hdlc_queues(port);
 err_hdlc_close:
 	hdlc_close(dev);
@@ -1110,16 +1574,19 @@
 static int hss_hdlc_close(struct net_device *dev)
 {
 	struct port *port = dev_to_port(dev);
-	unsigned long flags;
 	int i, buffs = RX_DESCS; /* allocated RX buffers */
 
-	spin_lock_irqsave(&npe_lock, flags);
-	ports_open--;
+	dma_pool_use_count--;
+	port->hdlc_open = 0;
 	qmgr_disable_irq(queue_ids[port->id].rx);
 	netif_stop_queue(dev);
 	napi_disable(&port->napi);
 
-	hss_stop_hdlc(port);
+	if (port->mode == MODE_G704)
+		hss_chan_close(port);
+
+	spin_lock_irq(&npe_lock);
+	hss_hdlc_stop(port);
 
 	while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
 		buffs--;
@@ -1151,12 +1618,11 @@
 #endif
 	qmgr_disable_irq(queue_ids[port->id].txdone);
 
-	if (port->plat->close)
-		port->plat->close(port->id, dev);
-	spin_unlock_irqrestore(&npe_lock, flags);
+	spin_unlock_irq(&npe_lock);
 
-	destroy_hdlc_queues(port);
+	destroy_hdlc_buffs(port);
 	release_hdlc_queues(port);
+	hss_port_close(port);
 	hdlc_close(dev);
 	return 0;
 }
@@ -1247,7 +1713,6 @@
 	sync_serial_settings new_line;
 	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
 	struct port *port = dev_to_port(dev);
-	unsigned long flags;
 	int clk;
 
 	if (cmd != SIOCWANDEV)
@@ -1266,6 +1731,32 @@
 		new_line.loopback = port->loopback;
 		if (copy_to_user(line, &new_line, size))
 			return -EFAULT;
+
+#if 0
+		if (!port->chan_buf)
+			return 0;
+
+		dma_sync_single_for_cpu(&dev->dev, port->chan_rx_buf_phys,
+				chan_rx_buf_len(port), DMA_FROM_DEVICE);
+		printk(KERN_DEBUG "RX:\n");
+		int i;
+		for (i = 0; i < chan_rx_buf_len(port); i++) {
+			if (i % 32 == 0)
+				printk(KERN_DEBUG "%03X ", i);
+			printk("%02X%c", chan_rx_buf(port)[i],
+			       (i + 1) % 32 ? ' ' : '\n');
+		}
+
+		printk(KERN_DEBUG "TX:\n");
+		for (i = 0; i < /*CHAN_TX_FRAMES * 2*/ chan_tx_buf_len(port)
+			     + chan_tx_lists_len(port); i++) {
+			if (i % 32 == 0)
+				printk(KERN_DEBUG "%03X ", i);
+			printk("%02X%c", chan_tx_buf(port)[i],
+			       (i + 1) % 32 ? ' ' : '\n');
+		}
+		port->msg_count = 10;
+#endif
 		return 0;
 
 	case IF_IFACE_SYNC_SERIAL:
@@ -1279,32 +1770,32 @@
 		if (port->plat->set_clock)
 			clk = port->plat->set_clock(port->id, clk);
 
-		if (clk != CLOCK_EXT && clk != CLOCK_INT)
+		if ((clk & ~(CLOCK_RX_INVERTED |
+			     CLOCK_TX_INVERTED)) != CLOCK_EXT &&
+		    (clk & ~(CLOCK_RX_INVERTED |
+			     CLOCK_TX_INVERTED)) != CLOCK_INT)
 			return -EINVAL;	/* No such clock setting */
 
 		if (new_line.loopback != 0 && new_line.loopback != 1)
 			return -EINVAL;
 
+		spin_lock_irq(&npe_lock);
 		port->clock_type = clk; /* Update settings */
-		if (clk == CLOCK_INT)
+		if ((clk & CLOCK_TYPE_MASK) == CLOCK_INT)
 			find_best_clock(new_line.clock_rate, &port->clock_rate,
 					&port->clock_reg);
 		else {
 			port->clock_rate = 0;
 			port->clock_reg = CLK42X_SPEED_2048KHZ;
 		}
+
 		port->loopback = new_line.loopback;
 
-		spin_lock_irqsave(&npe_lock, flags);
-
-		if (dev->flags & IFF_UP)
+		if (port->port_open_count)
 			hss_config(port);
 
-		if (port->loopback || port->carrier)
-			netif_carrier_on(port->netdev);
-		else
-			netif_carrier_off(port->netdev);
-		spin_unlock_irqrestore(&npe_lock, flags);
+		__hss_hdlc_set_carrier(port);
+		spin_unlock_irq(&npe_lock);
 
 		return 0;
 
@@ -1314,6 +1805,1065 @@
 }
 
 /*****************************************************************************
+ * channelized (G.704) operation
+ ****************************************************************************/
+
+static void g704_rx_framer_debug(struct port *port, u8 *data, unsigned int offset)
+{
+#if DEBUG_FRAMER
+	int i;
+	printk(KERN_DEBUG "HSS-%u %u (%u %u)\n", port->id, port->frame_sync_offset,
+	       offset, port->sync_counter);
+	printk(KERN_DEBUG);
+	for (i = 0; i < CHAN_RX_FRAMES; i++)
+		printk(" %02X", chan_rx_buf(port)[i]);
+	printk("\n");
+#endif
+}
+
+static void g704_rx_framer(struct port *port, unsigned int offset)
+{
+	u8 *data = chan_rx_buf(port) + sub_offset(offset, CHAN_RX_TRIGGER,
+						  CHAN_RX_FRAMES);
+	unsigned int bit, frame, cnt, aligned = 0;
+	u8 zeros_even, zeros_odd, ones_even, ones_odd, good = 0;
+
+	if (port->sync_counter < 10000)
+		port->sync_counter++;
+
+	/* discard the first frame set after changing the offset,
+	   the offset used there is unknown */
+	if (port->sync_counter == 1)
+		return;
+
+	dma_sync_single_for_cpu(port->dev, port->chan_rx_buf_phys,
+				CHAN_RX_FRAMES, DMA_FROM_DEVICE);
+
+	/* check if aligned first */
+	for (frame = 0; frame < CHAN_RX_TRIGGER; frame += 2) {
+		u8 ve = data[frame];
+		u8 vo = data[frame + 1];
+
+		if (((ve & 0x7F) == 0x1B && (vo & 0x40)) ||
+		    ((vo & 0x7F) == 0x1B && (ve & 0x40)))
+			good++;
+	}
+
+	if (good >= 3)
+		aligned = 1;
+
+#if DEBUG_FRAMER
+	if ((port->aligned && good != CHAN_RX_TRIGGER / 2) ||
+	    (!port->aligned && good))
+		g704_rx_framer_debug(port, data, offset);
+#endif
+
+	if (aligned) {
+		if (port->aligned)
+			goto out; /* no change */
+		if (printk_ratelimit())
+			printk(KERN_INFO "HSS-%i: synchronized at %u\n", port->id,
+			       port->frame_sync_offset);
+		g704_rx_framer_debug(port, data, offset);
+		port->aligned = 1;
+
+		atomic_inc(&port->chan_tx_irq_number);
+		wake_up_interruptible(&port->chan_tx_waitq);
+		atomic_inc(&port->chan_rx_irq_number);
+		wake_up_interruptible(&port->chan_rx_waitq);
+		goto out;
+	}
+
+	if (port->sync_counter < 4)
+		goto out;
+
+	/* not aligned */
+	if (port->aligned && printk_ratelimit()) {
+		printk(KERN_INFO "HSS-%i: lost alignment\n", port->id);
+		port->aligned = 0;
+		g704_rx_framer_debug(port, data, offset);
+
+		for (cnt = 0; cnt < MAX_CHAN_DEVICES; cnt++)
+			if (port->chan_devices[cnt]) {
+				set_bit(TX_ERROR_BIT, &port->chan_devices[cnt]->errors_bitmap);
+				set_bit(RX_ERROR_BIT, &port->chan_devices[cnt]->errors_bitmap);
+			}
+		atomic_inc(&port->chan_tx_irq_number);
+		wake_up_interruptible(&port->chan_tx_waitq);
+		atomic_inc(&port->chan_rx_irq_number);
+		wake_up_interruptible(&port->chan_rx_waitq);
+	}
+
+	zeros_even = zeros_odd = 0;
+	ones_even = ones_odd = 0xFF;
+	for (frame = 0; frame < CHAN_RX_TRIGGER; frame += 2) {
+		zeros_even |= data[frame];
+		zeros_odd |= data[frame + 1];
+		ones_even &= data[frame];
+		ones_odd &= data[frame + 1];
+	}
+
+	for (bit = 0; bit < 7; bit++) {
+		if ((zeros_even & ~0x9B) == 0 && (ones_even & 0x1B) == 0x1B &&
+		    (ones_odd & 0x40) == 0x40)
+			break;
+		if ((zeros_odd & ~0x9B) == 0 && (ones_odd & 0x1B) == 0x1B &&
+		    (ones_even & 0x40) == 0x40)
+			break;
+		zeros_even <<= 1;
+		ones_even = ones_even << 1 | 1;
+		zeros_odd <<= 1;
+		ones_odd = ones_odd << 1 | 1;
+	}
+
+	bit = 1;
+	port->frame_sync_offset += port->frame_size - bit;
+	port->frame_sync_offset %= port->frame_size;
+
+#if DEBUG_FRAMER
+	if (bit == 7)
+		printk(KERN_DEBUG "HSS-%i: trying frame sync at %u\n",
+		       port->id, port->frame_sync_offset);
+	else
+		printk(KERN_DEBUG "HSS-%i: found possible frame sync pattern at %u\n",
+		       port->id, port->frame_sync_offset);
+#endif
+
+	if (!bit)
+		goto out; /* possible change in sync frame order */
+
+	hss_config_main(port);
+	hss_config_load(port);
+	port->sync_counter = 0;
+out:
+	dma_sync_single_for_device(port->dev, port->chan_rx_buf_phys,
+				   CHAN_RX_FRAMES, DMA_FROM_DEVICE);
+}
+
+static void chan_process_tx_irq(struct chan_device *chan_dev, int offset)
+{
+	/* in bytes */
+	unsigned int buff_len = CHAN_TX_FRAMES * chan_dev->chan_count;
+	unsigned int list_len = CHAN_TX_LIST_FRAMES * chan_dev->chan_count;
+	int eaten, last_offset = chan_dev->port->chan_last_tx * list_len;
+
+	offset *= list_len;
+	eaten = sub_offset(offset, last_offset, buff_len);
+
+	if (chan_dev->tx_count > eaten + 2 * list_len) {
+		/* two pages must be reserved for the transmitter */
+		chan_dev->tx_first += eaten;
+		chan_dev->tx_first %= buff_len;
+		chan_dev->tx_count -= eaten;
+	} else {
+		/* FIXME check
+		   0
+		   1 tx_first (may still be transmited)
+		   2 tx_offset (currently reported by the NPE)
+		   3 tx_first + 2 * list_len (free to write here)
+		   4
+		   5
+		*/
+
+		/* printk(KERN_DEBUG "TX buffer underflow\n"); */
+		chan_dev->tx_first = sub_offset(offset, list_len, buff_len);
+		chan_dev->tx_count = 2 * list_len; /* reserve */
+		set_bit(TX_ERROR_BIT, &chan_dev->errors_bitmap);
+	}
+}
+
+static void chan_process_rx_irq(struct chan_device *chan_dev, int offset)
+{
+	/* in bytes */
+	unsigned int buff_len = CHAN_RX_FRAMES * chan_dev->chan_count;
+	unsigned int trig_len = CHAN_RX_TRIGGER * chan_dev->chan_count;
+	int last_offset = chan_dev->port->chan_last_rx * chan_dev->chan_count;
+
+	offset *= chan_dev->chan_count;
+	chan_dev->rx_count += sub_offset(offset, last_offset + trig_len,
+					 buff_len) + trig_len;
+	if (chan_dev->rx_count > buff_len - 2 * trig_len) {
+		/* two pages - offset[0] and offset[1] are lost - FIXME check */
+		/* printk(KERN_DEBUG "RX buffer overflow\n"); */
+		chan_dev->rx_first = (offset + 2 * trig_len) % buff_len;
+		chan_dev->rx_count = buff_len - 2 * trig_len;
+		set_bit(RX_ERROR_BIT, &chan_dev->errors_bitmap);
+	}
+}
+
+static void hss_chan_irq(void *pdev)
+{
+	struct port *port = pdev;
+	u32 v;
+
+#if DEBUG_RX
+	printk(KERN_DEBUG DRV_NAME ": hss_chan_irq\n");
+#endif
+	spin_lock(&npe_lock);
+	while ((v = qmgr_get_entry(queue_ids[port->id].chan))) {
+		unsigned int first, errors, tx_list, rx_frame;
+		int i, bad;
+
+		first = v >> 24;
+		errors = (v >> 16) & 0xFF;
+		tx_list = (v >> 8) & 0xFF;
+		rx_frame = v & 0xFF;
+
+		if (port->msg_count) {
+			printk(KERN_DEBUG "chan_irq hss %i jiffies %lu first"
+			       " 0x%02X errors 0x%02X tx_list 0x%02X rx_frame"
+			       " 0x%02X\n", port->id, jiffies, first, errors,
+			       tx_list, rx_frame);
+			port->msg_count--;
+		}
+
+		BUG_ON(rx_frame % CHAN_RX_TRIGGER);
+		BUG_ON(rx_frame >= CHAN_RX_FRAMES);
+		BUG_ON(tx_list >= CHAN_TX_LISTS);
+
+		bad = port->mode == MODE_G704 && !port->aligned;
+		if (!bad && tx_list != port->chan_last_tx) {
+			if (tx_list != (port->chan_last_tx + 1) % CHAN_TX_LISTS)
+				printk(KERN_DEBUG "HSS-%u: skipped IRQ: Tx last %i current %i\n",
+				       port->id, port->chan_last_tx, tx_list);
+			for (i = 0; i < MAX_CHAN_DEVICES; i++) {
+				if (!port->chan_devices[i] ||
+				    !port->chan_devices[i]->open_count)
+					continue;
+				chan_process_tx_irq(port->chan_devices[i], tx_list);
+			}
+			atomic_inc(&port->chan_tx_irq_number);
+#if 0
+			printk(KERN_DEBUG "wakeing up TX jiff %lu\n",
+			       jiffies, errors);
+#endif
+			wake_up_interruptible(&port->chan_tx_waitq);
+		}
+
+		if (rx_frame != (port->chan_last_rx + CHAN_RX_TRIGGER) % CHAN_RX_FRAMES)
+			printk(KERN_DEBUG "HSS-%u: skipped IRQ: Rx last %i current %i\n",
+			       port->id, port->chan_last_rx, rx_frame);
+
+		if (port->mode == MODE_G704)
+			g704_rx_framer(port, rx_frame);
+
+		if (!bad && (port->mode != MODE_G704 || port->aligned)) {
+			for (i = 0; i < MAX_CHAN_DEVICES; i++) {
+				if (!port->chan_devices[i] ||
+				    !port->chan_devices[i]->open_count)
+					continue;
+				chan_process_rx_irq(port->chan_devices[i], rx_frame);
+			}
+			atomic_inc(&port->chan_rx_irq_number);
+			wake_up_interruptible(&port->chan_rx_waitq);
+		}
+		port->chan_last_tx = tx_list;
+		port->chan_last_rx = rx_frame;
+	}
+	spin_unlock(&npe_lock);
+}
+
+
+static int hss_chan_open(struct port *port)
+{
+	int err;
+
+	might_sleep();
+
+	if (port->chan_open_count++)
+		return 0;   /* channelized mode already initialized */
+
+	if ((err = qmgr_request_queue(queue_ids[port->id].chan, CHAN_QUEUE_LEN,
+				      0, 0, "hss%i:chan", port->id)))
+		return err;
+
+	if (!(port->chan_buf = kmalloc(chan_tx_buf_len(port) +
+				       chan_tx_lists_len(port) +
+				       chan_rx_buf_len(port), GFP_KERNEL))) {
+		goto release_queue;
+		err = -ENOBUFS;
+	}
+
+	port->chan_tx_buf_phys = dma_map_single(port->dev, chan_tx_buf(port),
+						chan_tx_buf_len(port) +
+						chan_tx_lists_len(port),
+						DMA_TO_DEVICE);
+	if (dma_mapping_error(port->dev, port->chan_tx_buf_phys)) {
+		err = -EIO;
+		goto free;
+	}
+
+	port->chan_rx_buf_phys = dma_map_single(port->dev, chan_rx_buf(port),
+						chan_rx_buf_len(port),
+						DMA_FROM_DEVICE);
+	if (dma_mapping_error(port->dev, port->chan_rx_buf_phys)) {
+		err = -EIO;
+		goto unmap_tx;
+	}
+
+	qmgr_set_irq(queue_ids[port->id].chan, QUEUE_IRQ_SRC_NOT_EMPTY,
+		     hss_chan_irq, port);
+	qmgr_enable_irq(queue_ids[port->id].chan);
+	return 0;
+
+unmap_tx:
+	dma_unmap_single(port->dev, port->chan_tx_buf_phys,
+			 chan_tx_buf_len(port) + chan_tx_lists_len(port),
+			 DMA_TO_DEVICE);
+free:
+	kfree(port->chan_buf);
+	port->chan_buf = NULL;
+release_queue:
+	qmgr_release_queue(queue_ids[port->id].chan);
+	return err;
+}
+
+void hss_chan_close(struct port *port)
+{
+	might_sleep();
+
+	if (--port->chan_open_count)
+		return;		/* channelized mode already stopped */
+
+	hss_chan_stop(port);
+
+	qmgr_disable_irq(queue_ids[port->id].chan);
+
+	dma_unmap_single(port->dev, port->chan_tx_buf_phys,
+			 chan_tx_buf_len(port) + chan_tx_lists_len(port),
+			 DMA_TO_DEVICE);
+	dma_unmap_single(port->dev, port->chan_rx_buf_phys,
+			 chan_rx_buf_len(port), DMA_FROM_DEVICE);
+	kfree(port->chan_buf);
+	port->chan_buf = NULL;
+	while (qmgr_get_entry(queue_ids[port->id].chan))
+		; /* drain all entries */
+	qmgr_release_queue(queue_ids[port->id].chan);
+}
+
+static int hss_char_open(struct inode *inode, struct file *file)
+{
+	struct chan_device *chan_dev = inode_to_chan_dev(inode);
+	struct port *port = chan_dev->port;
+	int i, err = 0;
+
+	if ((err = hss_load_firmware(port)))
+		return err;
+
+	rtnl_lock();
+	if (port->mode == MODE_HDLC) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	if (port->mode == MODE_G704 && port->channels[0] == chan_dev->id) {
+		err = -EBUSY; /* channel #0 is used for G.704 signaling */
+		goto out;
+	}
+
+	for (i = MAX_CHANNELS; i > port->frame_size / 8; i--)
+		if (port->channels[i - 1] == chan_dev->id) {
+			err = -ECHRNG; /* frame too short */
+			goto out;
+		}
+
+	if (chan_dev->open_count && (chan_dev->excl_open || (file->f_flags & O_EXCL))) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if ((err = hss_chan_open(port)))
+		goto out;
+
+	spin_lock_irq(&npe_lock);
+
+	if (chan_dev->open_count) {
+		chan_dev->open_count++;
+		goto out_unlock;
+	}
+
+	chan_dev->rx_first = chan_dev->tx_first = 0;
+	chan_dev->rx_count = chan_dev->tx_count = 0;
+	clear_bit(TX_ERROR_BIT, &chan_dev->errors_bitmap);
+	clear_bit(RX_ERROR_BIT, &chan_dev->errors_bitmap);
+
+	hss_chan_stop(port);
+	chan_dev->open_count++;
+	chan_dev->excl_open = !!(file->f_flags & O_EXCL);
+
+	hss_config(port);
+	hss_chan_start(port);
+
+out_unlock:
+	spin_unlock_irq(&npe_lock);
+	if (!err)
+		hss_port_open(port);
+out:
+	rtnl_unlock();
+	return err;
+}
+
+static int hss_char_close(struct inode *inode, struct file *file)
+{
+	struct chan_device *chan_dev = inode_to_chan_dev(inode);
+	struct port *port = chan_dev->port;
+
+	rtnl_lock();
+	spin_lock_irq(&npe_lock);
+
+	if (--chan_dev->open_count) {
+		hss_chan_stop(port);
+		hss_config(port);
+		hss_chan_start(port);
+	}
+
+	spin_unlock_irq(&npe_lock);
+
+	if (!chan_dev->open_count) {
+		hss_chan_close(port);
+		hss_port_close(port);
+	}
+	rtnl_unlock();
+	return 0;
+}
+
+static ssize_t hss_char_read(struct file *file, char __user *buf, size_t count,
+			     loff_t *f_pos)
+{
+	struct chan_device *chan_dev = inode_to_chan_dev(file->f_path.dentry->d_inode);
+	struct port *port = chan_dev->port;
+	int res = 0, prev_irq, loops = 0;
+
+	/* wait for data */
+	while (1) {
+#if 0
+		if (test_and_clear_bit(RX_ERROR_BIT, &chan_dev->errors_bitmap))
+			return -EIO;
+#endif
+		if (count == 0)
+			return 0; /* no data requested */
+
+		prev_irq = atomic_read(&port->chan_rx_irq_number);
+
+		spin_lock_irq(&npe_lock);
+		if (chan_dev->rx_count) {
+			u8 *rx_buf = chan_rx_buf(port), *output;
+			if (count > chan_dev->rx_count)
+				count = chan_dev->rx_count;
+#if 0
+			if (loops > 1)
+				printk(KERN_DEBUG "ENTRY rx_first %u rx_count %u count %i"
+				       " last_rx %u loops %i\n", chan_dev->rx_first,
+				       chan_dev->rx_count, count, port->chan_last_rx, loops);
+#endif
+			if (!(output = kmalloc(count, GFP_ATOMIC))) {
+				spin_unlock_irq(&npe_lock);
+				return -ENOMEM;
+			}
+
+			dma_sync_single_for_cpu(port->dev, port->chan_rx_buf_phys,
+						chan_rx_buf_len(port), DMA_FROM_DEVICE);
+
+			for (res = 0; res < count; res++) {
+				unsigned int chan = chan_dev->rx_first % chan_dev->chan_count;
+				unsigned int frame = chan_dev->rx_first / chan_dev->chan_count;
+
+				chan = chan_dev->log_channels[chan];
+				output[res] = rx_buf[chan * CHAN_RX_FRAMES + frame];
+				chan_dev->rx_first++;
+				chan_dev->rx_first %= CHAN_RX_FRAMES * chan_dev->chan_count;
+				chan_dev->rx_count--;
+			}
+			dma_sync_single_for_device(port->dev, port->chan_rx_buf_phys,
+						   chan_rx_buf_len(port), DMA_FROM_DEVICE);
+			spin_unlock_irq(&npe_lock);
+#if 0
+			printk(KERN_DEBUG "EXIT  rx_first %u rx_count %u res %i\n",
+			       chan_dev->rx_first, chan_dev->rx_count, res);
+#endif
+			if (copy_to_user(buf, output, count))
+				res = -EFAULT;
+			kfree(output);
+			return res;
+		}
+		spin_unlock_irq(&npe_lock);
+		loops++;
+		if (wait_event_interruptible(port->chan_rx_waitq,
+					     atomic_read(&port->chan_rx_irq_number) != prev_irq))
+			return -ERESTARTSYS;
+	}
+}
+
+static ssize_t hss_char_write(struct file *file, const char __user *buf,
+			      size_t count, loff_t *f_pos)
+{
+	struct chan_device *chan_dev = inode_to_chan_dev(file->f_path.dentry->d_inode);
+	struct port *port = chan_dev->port;
+	int res = 0, prev_irq, loops = 0;
+
+	/* wait for room */
+	while (1) {
+#if 0
+		if (test_and_clear_bit(TX_ERROR_BIT, &chan_dev->errors_bitmap))
+			return -EIO;
+#endif
+		if (count == 0)
+			return 0; /* no data to send */
+
+		prev_irq = atomic_read(&port->chan_tx_irq_number);
+
+		spin_lock_irq(&npe_lock);
+		if (chan_dev->tx_count < CHAN_TX_FRAMES * chan_dev->chan_count) {
+			u8 *tx_buf = chan_tx_buf(port), *input;
+			if (count > CHAN_TX_FRAMES * chan_dev->chan_count)
+				count = CHAN_TX_FRAMES * chan_dev->chan_count;
+#if 0
+			if (loops > 1)
+				printk(KERN_DEBUG "ENTRY TX_first %u tx_count %u count %i"
+				       " last_tx %u loops %i\n", chan_dev->tx_first,
+				       chan_dev->tx_count, count, port->chan_last_tx, loops);
+#endif
+			if (!(input = kmalloc(count, GFP_ATOMIC))) {
+				spin_unlock_irq(&npe_lock);
+				return -ENOMEM;
+			}
+
+			dma_sync_single_for_cpu(port->dev, port->chan_tx_buf_phys,
+						chan_tx_buf_len(port), DMA_TO_DEVICE);
+
+			if (copy_from_user(input, buf, count))
+				res = -EFAULT;
+			else
+				for (res = 0; res < count; res++) {
+					unsigned int tail, chan, frame;
+
+					tail = (chan_dev->tx_first + chan_dev->tx_count) %
+						(CHAN_TX_FRAMES * chan_dev->chan_count);
+					chan = tail % chan_dev->chan_count;
+					frame = tail / chan_dev->chan_count;
+					chan = chan_dev->log_channels[chan];
+
+					tx_buf[chan * CHAN_TX_FRAMES + frame] = input[res];
+					chan_dev->tx_count++;
+				}
+			dma_sync_single_for_device(port->dev, port->chan_tx_buf_phys,
+						   chan_tx_buf_len(port), DMA_TO_DEVICE);
+			spin_unlock_irq(&npe_lock);
+#if 0
+			printk(KERN_DEBUG "EXIT  TX_first %u tx_count %u res %i\n",
+			       chan_dev->tx_first, chan_dev->tx_count, res);
+#endif
+			kfree(input);
+			return res;
+		}
+		spin_unlock_irq(&npe_lock);
+		loops++;
+		if (wait_event_interruptible(port->chan_tx_waitq,
+					     atomic_read(&port->chan_tx_irq_number) != prev_irq))
+			return -ERESTARTSYS;
+	}
+}
+
+
+static unsigned int hss_char_poll(struct file *file, poll_table *wait)
+{
+	struct chan_device *chan_dev = inode_to_chan_dev
+		(file->f_path.dentry->d_inode);
+	struct port *port = chan_dev->port;
+	unsigned int mask = 0;
+
+	spin_lock_irq(&npe_lock);
+	poll_wait(file, &port->chan_tx_waitq, wait);
+	poll_wait(file, &port->chan_rx_waitq, wait);
+
+	if (chan_dev->tx_count < CHAN_TX_FRAMES * chan_dev->chan_count)
+		mask |= POLLOUT | POLLWRNORM;
+	if (chan_dev->rx_count)
+		mask |= POLLIN | POLLRDNORM;
+	spin_unlock_irq(&npe_lock);
+	return mask;
+}
+
+/*****************************************************************************
+ * channelized device sysfs attributes
+ ****************************************************************************/
+
+static ssize_t chan_show_chan(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	int ret;
+	struct chan_device *chan_dev = dev_get_drvdata(dev);
+
+	rtnl_lock();
+	ret = print_channels(chan_dev->port, buf, chan_dev->id);
+	rtnl_unlock();
+	return ret;
+}
+
+static ssize_t chan_set_chan(struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct chan_device *chan_dev = dev_get_drvdata(dev);
+	struct port *port = chan_dev->port;
+	int ret = len;
+
+	if (len && buf[len - 1] == '\n')
+		len--;
+
+	if (len != 7 || memcmp(buf, "destroy", 7))
+		return -EINVAL;
+
+	rtnl_lock();
+	if (chan_dev->open_count)
+		ret = -EBUSY;
+	else {
+		unsigned int ch;
+		cdev_del(&chan_dev->cdev);
+
+		for (ch = 0; ch < MAX_CHANNELS; ch++)
+			if (port->channels[ch] == chan_dev->id)
+				port->channels[ch] = CHANNEL_UNUSED;
+		port->chan_devices[chan_dev->id] = NULL;
+		kfree(chan_dev);
+		BUG_ON(device_schedule_callback(dev, device_unregister));
+	}
+	rtnl_unlock();
+	return ret;
+}
+
+static struct device_attribute chan_attr =
+	__ATTR(channels, 0644, chan_show_chan, chan_set_chan);
+
+/*****************************************************************************
+ * main sysfs attributes
+ ****************************************************************************/
+
+static const struct file_operations chan_fops = {
+	.owner   = THIS_MODULE,
+	.llseek  = no_llseek,
+	.read    = hss_char_read,
+	.write   = hss_char_write,
+	.poll    = hss_char_poll,
+	.open    = hss_char_open,
+	.release = hss_char_close,
+};
+
+static ssize_t create_chan(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t len)
+{
+	struct port *port = dev_get_drvdata(dev);
+	struct chan_device *chan_dev;
+	u8 channels[MAX_CHANNELS];
+	size_t orig_len = len;
+	unsigned int ch, id, first_channel;
+	int minor, err;
+
+	if ((err = parse_channels(&buf, &len, channels)) < 1)
+		return err;
+
+	if (!(chan_dev = kzalloc(sizeof(struct chan_device), GFP_KERNEL)))
+		return -ENOBUFS;
+
+	rtnl_lock();
+
+	if (port->mode != MODE_RAW && port->mode != MODE_G704) {
+		err = -EINVAL;
+		goto free;
+	}
+
+	for (ch = 0; ch < MAX_CHANNELS; ch++)
+		if (channels[ch] && port->channels[ch] != CHANNEL_UNUSED) {
+			printk(KERN_DEBUG "Channel #%i already in use\n", ch);
+			err = -EBUSY;
+			goto free;
+		}
+
+	for (id = 0; id < MAX_CHAN_DEVICES; id++)
+		if (port->chan_devices[id] == NULL)
+			break;
+
+	if (id == MAX_CHAN_DEVICES) {
+		err = -EBUSY;
+		goto free;
+	}
+
+	for (first_channel = 0; first_channel < MAX_CHANNELS; first_channel++)
+		if (channels[first_channel])
+			break;
+
+	minor = port->id * MAX_CHAN_DEVICES + first_channel;
+	chan_dev->id = id;
+	chan_dev->port = port;
+	cdev_init(&chan_dev->cdev, &chan_fops);
+	chan_dev->cdev.owner = THIS_MODULE;
+	if ((err = cdev_add(&chan_dev->cdev, MKDEV(chan_major, minor), 1)))
+		goto free;
+
+	spin_lock_irq(&npe_lock);
+	for (ch = first_channel; ch < MAX_CHANNELS; ch++)
+		if (channels[ch])
+			port->channels[ch] = id;
+	port->chan_devices[id] = chan_dev;
+	spin_unlock_irq(&npe_lock);
+
+	chan_dev->dev = device_create(hss_class, dev, MKDEV(chan_major, minor),
+				      chan_dev, "hss%uch%u", port->id, first_channel);
+	BUG_ON(!chan_dev->dev);
+	BUG_ON(device_create_file(chan_dev->dev, &chan_attr));
+	rtnl_unlock();
+
+	return orig_len;
+
+free:
+	kfree(chan_dev);
+	rtnl_unlock();
+	return err;
+}
+
+static ssize_t show_hdlc_chan(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	int ret;
+
+	rtnl_unlock();
+	ret = print_channels(dev_get_drvdata(dev), buf, CHANNEL_HDLC);
+	rtnl_unlock();
+	return ret;
+}
+
+static ssize_t set_hdlc_chan(struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct port *port = dev_get_drvdata(dev);
+	u8 channels[MAX_CHANNELS];
+	size_t orig_len = len;
+	unsigned int ch;
+	int err;
+
+	if ((err = parse_channels(&buf, &len, channels)) < 0)
+		return err;
+
+	rtnl_lock();
+	spin_lock_irq(&npe_lock);
+
+	if (port->mode != MODE_RAW && port->mode != MODE_G704) {
+		err = -EINVAL;
+		goto err;
+	}
+
+	for (ch = 0; ch < MAX_CHANNELS; ch++)
+		if (channels[ch] &&
+		    port->channels[ch] != CHANNEL_UNUSED &&
+		    port->channels[ch] != CHANNEL_HDLC) {
+			printk(KERN_DEBUG "Channel #%i already in use\n", ch);
+			err = -EBUSY;
+			goto err;
+		}
+
+	for (ch = 0; ch < MAX_CHANNELS; ch++)
+		if (channels[ch])
+			port->channels[ch] = CHANNEL_HDLC;
+		else if (port->channels[ch] == CHANNEL_HDLC)
+			port->channels[ch] = CHANNEL_UNUSED;
+
+	if (port->hdlc_open)
+		hss_config(port);
+
+	spin_unlock_irq(&npe_lock);
+	rtnl_unlock();
+	return orig_len;
+
+err:
+	spin_unlock_irq(&npe_lock);
+	rtnl_unlock();
+	return err;
+}
+
+static ssize_t show_clock_type(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct port *port = dev_get_drvdata(dev);
+
+	rtnl_lock();
+	strcpy(buf, (port->clock_type & CLOCK_TYPE_MASK) == CLOCK_INT ?
+	       "int\n" : "ext\n");
+	rtnl_unlock();
+	return 5;
+}
+
+static ssize_t set_clock_type(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t len)
+{
+	struct port *port = dev_get_drvdata(dev);
+	unsigned int clk;
+	int ret = len;
+
+	if (len && buf[len - 1] == '\n')
+		len--;
+
+	if (len != 3)
+		return -EINVAL;
+	if (!memcmp(buf, "ext", 3))
+		clk = CLOCK_EXT;
+	else if (!memcmp(buf, "int", 3))
+		clk = CLOCK_INT;
+	else
+		return -EINVAL;
+
+	rtnl_lock();
+	spin_lock_irq(&npe_lock);
+	if (port->plat->set_clock)
+		clk = port->plat->set_clock(port->id, clk);
+	if (clk != CLOCK_EXT && clk != CLOCK_INT) {
+		ret = -EINVAL; /* plat->set_clock shouldn't change the state */
+		goto err;
+	}
+	port->clock_type = clk;
+	if (port->port_open_count)
+		hss_config(port);
+err:
+	spin_unlock_irq(&npe_lock);
+	rtnl_unlock();
+	return ret;
+}
+
+static ssize_t show_clock_rate(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct port *port = dev_get_drvdata(dev);
+
+	rtnl_unlock();
+	sprintf(buf, "%u\n", port->clock_rate);
+	rtnl_lock();
+	return strlen(buf) + 1;
+}
+
+static ssize_t set_clock_rate(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t len)
+{
+#if 0
+	struct port *port = dev_get_drvdata(dev);
+	size_t orig_len = len;
+	unsigned int rate;
+
+	if (len && buf[len - 1] == '\n')
+		len--;
+
+	if (get_number(&buf, &len, &rate, 1, 0xFFFFFFFFu))
+		return -EINVAL;
+	if (len)
+		return -EINVAL;
+
+	rtnl_unlock();
+	spin_lock_irq(&npe_lock);
+	port->clock_rate = rate;
+	spin_unlock_irq(&npe_lock);
+	rtnl_lock();
+	return orig_len;
+#endif
+	return -EINVAL; /* FIXME not yet supported */
+}
+
+static ssize_t show_frame_size(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct port *port = dev_get_drvdata(dev);
+	int ret;
+
+	rtnl_lock();
+	if (port->mode != MODE_RAW)
+		ret = -EINVAL;
+	else {
+		sprintf(buf, "%u\n", port->frame_size);
+		ret = strlen(buf) + 1;
+	}
+	rtnl_unlock();
+	return ret;
+}
+
+static ssize_t set_frame_size(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t len)
+{
+	struct port *port = dev_get_drvdata(dev);
+	size_t ret = len;
+	unsigned int size;
+
+	if (len && buf[len - 1] == '\n')
+		len--;
+
+	if (get_number(&buf, &len, &size, MIN_FRAME_SIZE, MAX_FRAME_SIZE))
+		return -EINVAL;
+	if (len || size % 8 > 1)
+		return -EINVAL;
+
+	rtnl_lock();
+	if (port->mode != MODE_RAW)
+		ret = -EINVAL;
+	else if (port->port_open_count)
+		ret = -EBUSY;
+	else {
+		spin_lock_irq(&npe_lock);
+		port->frame_size = size;
+		port->frame_sync_offset = 0;
+		spin_unlock_irq(&npe_lock);
+	}
+	rtnl_unlock();
+	return ret;
+}
+
+static ssize_t show_frame_offset(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct port *port = dev_get_drvdata(dev);
+	int ret;
+
+	rtnl_lock();
+
+	if (port->mode != MODE_RAW && port->mode != MODE_G704)
+		ret = -EINVAL;
+	else {
+		spin_lock_irq(&npe_lock);
+		sprintf(buf, "%u\n", port->frame_sync_offset);
+		spin_unlock_irq(&npe_lock);
+		ret = strlen(buf) + 1;
+	}
+	rtnl_unlock();
+	return ret;
+}
+
+static ssize_t set_frame_offset(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	struct port *port = dev_get_drvdata(dev);
+	size_t ret = len;
+	unsigned int offset;
+
+	if (len && buf[len - 1] == '\n')
+		len--;
+
+	rtnl_lock();
+	if ((port->mode != MODE_RAW) ||
+	    get_number(&buf, &len, &offset, 0, port->frame_size - 1) || len)
+		ret = -EINVAL;
+	else {
+		spin_lock_irq(&npe_lock);
+		port->frame_sync_offset = offset;
+		if (port->port_open_count) {
+			hss_config_main(port);
+			hss_config_load(port);
+		}
+		spin_unlock_irq(&npe_lock);
+	}
+	rtnl_unlock();
+	return ret;
+}
+
+static ssize_t show_loopback(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct port *port = dev_get_drvdata(dev);
+
+	rtnl_lock();
+	sprintf(buf, "%u\n", port->loopback);
+	rtnl_unlock();
+
+	return strlen(buf) + 1;
+}
+
+static ssize_t set_loopback(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t len)
+{
+	struct port *port = dev_get_drvdata(dev);
+	size_t orig_len = len;
+	unsigned int lb;
+
+	if (len && buf[len - 1] == '\n')
+		len--;
+
+	if (get_number(&buf, &len, &lb, 0, 1))
+		return -EINVAL;
+	if (len)
+		return -EINVAL;
+
+	rtnl_lock();
+	spin_lock_irq(&npe_lock);
+
+	if (port->loopback != lb) {
+		port->loopback = lb;
+		if (port->port_open_count)
+			hss_config(port);
+
+		__hss_hdlc_set_carrier(port);
+	}
+
+	spin_unlock_irq(&npe_lock);
+	rtnl_unlock();
+	return orig_len;
+}
+
+static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct port *port = dev_get_drvdata(dev);
+
+	rtnl_lock();
+	switch(port->mode) {
+	case MODE_RAW:
+		strcpy(buf, "raw\n");
+		break;
+	case MODE_G704:
+		strcpy(buf, "g704\n");
+		break;
+	default:
+		strcpy(buf, "hdlc\n");
+		break;
+	}
+	rtnl_unlock();
+
+	return strlen(buf) + 1;
+}
+
+static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	struct port *port = dev_get_drvdata(dev);
+	size_t ret = len;
+
+	if (len && buf[len - 1] == '\n')
+		len--;
+
+	rtnl_lock();
+
+	if (port->port_open_count)
+		ret = -EBUSY;
+	else if (len == 4 && !memcmp(buf, "hdlc", 4))
+		port->mode = MODE_HDLC;
+	else if (len == 3 && !memcmp(buf, "raw", 3))
+		port->mode = MODE_RAW;
+	else if (len == 4 && !memcmp(buf, "g704", 4)) {
+		port->mode = MODE_G704;
+		port->frame_size = 256;
+		port->frame_sync_offset = 0;
+	} else
+		ret = -EINVAL;
+
+	rtnl_unlock();
+	return ret;
+}
+
+static struct device_attribute hss_attrs[] = {
+	__ATTR(create_chan, 0200, NULL, create_chan),
+	__ATTR(hdlc_chan, 0644, show_hdlc_chan, set_hdlc_chan),
+	__ATTR(clock_type, 0644, show_clock_type, set_clock_type),
+	__ATTR(clock_rate, 0644, show_clock_rate, set_clock_rate),
+	__ATTR(frame_size, 0644, show_frame_size, set_frame_size),
+	__ATTR(frame_offset, 0644, show_frame_offset, set_frame_offset),
+	__ATTR(loopback, 0644, show_loopback, set_loopback),
+	__ATTR(mode, 0644, show_mode, set_mode),
+};
+
+/*****************************************************************************
  * initialization
  ****************************************************************************/
 
@@ -1330,7 +2880,7 @@
 	struct port *port;
 	struct net_device *dev;
 	hdlc_device *hdlc;
-	int err;
+	int i, err;
 
 	if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
 		return -ENOMEM;
@@ -1354,16 +2904,23 @@
 	port->clock_type = CLOCK_EXT;
 	port->clock_rate = 0;
 	port->clock_reg = CLK42X_SPEED_2048KHZ;
+	port->frame_size = 256; /* E1 */
 	port->id = pdev->id;
 	port->dev = &pdev->dev;
 	port->plat = pdev->dev.platform_data;
+	memset(port->channels, CHANNEL_UNUSED, sizeof(port->channels));
+	init_waitqueue_head(&port->chan_tx_waitq);
+	init_waitqueue_head(&port->chan_rx_waitq);
 	netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
 
-	if ((err = register_hdlc_device(dev)))
+	if ((err = register_hdlc_device(dev))) /* HDLC mode by default */
 		goto err_free_netdev;
 
 	platform_set_drvdata(pdev, port);
 
+	for (i = 0; i < ARRAY_SIZE(hss_attrs); i++)
+		BUG_ON(device_create_file(port->dev, &hss_attrs[i]));
+
 	printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
 	return 0;
 
@@ -1379,6 +2936,16 @@
 static int __devexit hss_remove_one(struct platform_device *pdev)
 {
 	struct port *port = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hss_attrs); i++)
+		device_remove_file(port->dev, &hss_attrs[i]);
+
+	for (i = 0; i < MAX_CHAN_DEVICES; i++)
+		if (port->chan_devices[i]) {
+			device_unregister(port->chan_devices[i]->dev);
+			cdev_del(&port->chan_devices[i]->cdev);
+		}
 
 	unregister_hdlc_device(port->netdev);
 	free_netdev(port->netdev);
@@ -1396,19 +2963,45 @@
 
 static int __init hss_init_module(void)
 {
+	int err;
+	dev_t rdev;
+
 	if ((ixp4xx_read_feature_bits() &
 	     (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
 	    (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
 		return -ENODEV;
 
+	if ((err = alloc_chrdev_region(&rdev, 0, HSS_COUNT * MAX_CHAN_DEVICES,
+				       "hss")))
+		return err;
+
 	spin_lock_init(&npe_lock);
 
-	return platform_driver_register(&ixp4xx_hss_driver);
+	if (IS_ERR(hss_class = class_create(THIS_MODULE, "hss"))) {
+		printk(KERN_ERR "Can't register device class 'hss'\n");
+		err = PTR_ERR(hss_class);
+		goto free_chrdev;
+	}
+	if ((err = platform_driver_register(&ixp4xx_hss_driver)))
+		goto destroy_class;
+
+	chan_major = MAJOR(rdev);
+	return 0;
+
+destroy_class:
+	class_destroy(hss_class);
+free_chrdev:
+	unregister_chrdev_region(MKDEV(chan_major, 0),
+				 HSS_COUNT * MAX_CHAN_DEVICES);
+	return err;
 }
 
 static void __exit hss_cleanup_module(void)
 {
 	platform_driver_unregister(&ixp4xx_hss_driver);
+	class_destroy(hss_class);
+	unregister_chrdev_region(MKDEV(chan_major, 0),
+				 HSS_COUNT * MAX_CHAN_DEVICES);
 }
 
 MODULE_AUTHOR("Krzysztof Halasa");
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d1b2306..b0d0e02 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -22,6 +22,19 @@
 	  Say Y, if you want to debug atheros wireless drivers.
 	  Right now only ath9k makes use of this.
 
+config ATH_DEFAULT_COUNTRY
+	int "Default country code for Atheros cards"
+	range 1 8191
+	default "840"
+	---help---
+	  This allows you to specify a default country code for cards which
+	  don't specify their country code and which indicate that the default
+	  country should be used.
+	  It has no effect on cards which _do_ specify the country code or
+	  a (non-zero) regulatory domain.
+
+	  If building for use in the USA, say "840".
+
 source "drivers/net/wireless/ath/ath5k/Kconfig"
 source "drivers/net/wireless/ath/ath9k/Kconfig"
 source "drivers/net/wireless/ath/carl9170/Kconfig"
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 126a4ea..62f77f3 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -278,9 +278,10 @@
 		}
 
 		/* Enable sleep clock operation */
+#if 0
 		AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
 				AR5K_PCICFG_SLEEP_CLOCK_EN);
-
+#endif
 	} else {
 
 		/* Disable sleep clock operation and
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 028310f..366bf48 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -533,7 +533,7 @@
 	    regdmn == CTRY_DEFAULT) {
 		printk(KERN_DEBUG "ath: EEPROM indicates default "
 		       "country code should be used\n");
-		reg->country_code = CTRY_UNITED_STATES;
+		reg->country_code = CONFIG_ATH_DEFAULT_COUNTRY;
 	}
 
 	if (reg->country_code == CTRY_DEFAULT) {
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index bc90b09..785ef97 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -34,8 +34,11 @@
 	struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
 
 	if (rtc == NULL) {
+#if 0
+		/* don't print this, many routers have no RTC */
 		pr_err("%s: unable to open rtc device (%s)\n",
 			__FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+#endif
 		goto err_open;
 	}
 
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 989e16e..ce63085 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -329,9 +329,16 @@
 
 	/* OAM F5 end-to-end */
 	if (pti == ATM_PTI_E2EF5) {
-		if (printk_ratelimit())
+		if (printk_ratelimit()) {
+			char buffer[ATM_CELL_SIZE * 3 + 1];
+			int i;
 			atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
 				__func__, vpi, vci);
+
+			for (i = 0; i < ATM_CELL_SIZE; i++)
+				sprintf(buffer + i * 3, " %02X", source[i]);
+			atm_warn(instance, "%s:%s\n", __func__, buffer);
+		}
 		atomic_inc(&vcc->stats->rx_err);
 		return;
 	}
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index e02c0ec..f0ef660 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -175,12 +175,14 @@
 {
 	int ret;
 
-	if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
+#ifdef CONFIG_IXP4XX_SUPPORT_425A0
+	if (cpu_is_ixp42x_rev_a0()) {
 		printk(KERN_ERR "IXP4XXX Watchdog: Rev. A0 IXP42x CPU detected"
 			" - watchdog disabled\n");
 
 		return -ENODEV;
 	}
+#endif
 	spin_lock_init(&wdt_lock);
 	boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
 			WDIOF_CARDRESET : 0;
diff --git a/include/linux/hdlc/ioctl.h b/include/linux/hdlc/ioctl.h
index 5839723..89bffef 100644
--- a/include/linux/hdlc/ioctl.h
+++ b/include/linux/hdlc/ioctl.h
@@ -4,11 +4,14 @@
 
 #define GENERIC_HDLC_VERSION 4	/* For synchronization with sethdlc utility */
 
-#define CLOCK_DEFAULT   0	/* Default setting */
-#define CLOCK_EXT	1	/* External TX and RX clock - DTE */
-#define CLOCK_INT	2	/* Internal TX and RX clock - DCE */
-#define CLOCK_TXINT	3	/* Internal TX and external RX clock */
-#define CLOCK_TXFROMRX	4	/* TX clock derived from external RX clock */
+#define CLOCK_DEFAULT		0	/* Default setting */
+#define CLOCK_EXT		1	/* External TX and RX clock - DTE */
+#define CLOCK_INT		2	/* Internal TX and RX clock - DCE */
+#define CLOCK_TXINT		3	/* Internal TX and external RX clock */
+#define CLOCK_TXFROMRX		4	/* TX clock derived from external RX clock */
+#define CLOCK_TYPE_MASK		0xFF
+#define CLOCK_RX_INVERTED	0x40000000
+#define CLOCK_TX_INVERTED	0x80000000
 
 
 #define ENCODING_DEFAULT	0 /* Default setting */
@@ -34,13 +37,13 @@
 #define LMI_CCITT		3 /* ITU-T Annex A */
 #define LMI_CISCO		4 /* The "original" LMI, aka Gang of Four */
 
-typedef struct { 
+typedef struct {
 	unsigned int clock_rate; /* bits per second */
 	unsigned int clock_type; /* internal, external, TX-internal etc. */
 	unsigned short loopback;
 } sync_serial_settings;          /* V.35, V.24, X.21 */
 
-typedef struct { 
+typedef struct {
 	unsigned int clock_rate; /* bits per second */
 	unsigned int clock_type; /* internal, external, TX-internal etc. */
 	unsigned short loopback;
diff --git a/include/linux/imq.h b/include/linux/imq.h
new file mode 100644
index 0000000..1babb09
--- /dev/null
+++ b/include/linux/imq.h
@@ -0,0 +1,13 @@
+#ifndef _IMQ_H
+#define _IMQ_H
+
+/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
+#define IMQ_F_BITS	5
+
+#define IMQ_F_IFMASK	0x0f
+#define IMQ_F_ENQUEUE	0x10
+
+#define IMQ_MAX_DEVS	(IMQ_F_IFMASK + 1)
+
+#endif /* _IMQ_H */
+
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 857f502..61e55aa 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -22,7 +22,8 @@
 #define NF_QUEUE 3
 #define NF_REPEAT 4
 #define NF_STOP 5
-#define NF_MAX_VERDICT NF_STOP
+#define NF_IMQ_QUEUE 6
+#define NF_MAX_VERDICT NF_IMQ_QUEUE
 
 /* we overload the higher bits for encoding auxiliary data such as the queue
  * number or errno values. Not nice, but better than additional function
diff --git a/include/linux/netfilter/xt_IMQ.h b/include/linux/netfilter/xt_IMQ.h
new file mode 100644
index 0000000..9b07230
--- /dev/null
+++ b/include/linux/netfilter/xt_IMQ.h
@@ -0,0 +1,9 @@
+#ifndef _XT_IMQ_H
+#define _XT_IMQ_H
+
+struct xt_imq_info {
+	unsigned int todev;     /* target imq device */
+};
+
+#endif /* _XT_IMQ_H */
+
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index f9930c8..a89284d 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -2,6 +2,7 @@
 header-y += ip_tables.h
 header-y += ipt_CLUSTERIP.h
 header-y += ipt_ECN.h
+header-y += ipt_IMQ.h
 header-y += ipt_LOG.h
 header-y += ipt_REJECT.h
 header-y += ipt_SAME.h
diff --git a/include/linux/netfilter_ipv4/ipt_IMQ.h b/include/linux/netfilter_ipv4/ipt_IMQ.h
new file mode 100644
index 0000000..7af320f
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
@@ -0,0 +1,10 @@
+#ifndef _IPT_IMQ_H
+#define _IPT_IMQ_H
+
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_IMQ.h>
+
+#define ipt_imq_info xt_imq_info
+
+#endif /* _IPT_IMQ_H */
+
diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild
index bd095bc..821f558 100644
--- a/include/linux/netfilter_ipv6/Kbuild
+++ b/include/linux/netfilter_ipv6/Kbuild
@@ -1,5 +1,6 @@
 header-y += ip6_tables.h
 header-y += ip6t_HL.h
+header-y += ip6t_IMQ.h
 header-y += ip6t_LOG.h
 header-y += ip6t_REJECT.h
 header-y += ip6t_ah.h
diff --git a/include/linux/netfilter_ipv6/ip6t_IMQ.h b/include/linux/netfilter_ipv6/ip6t_IMQ.h
new file mode 100644
index 0000000..198ac01
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
@@ -0,0 +1,10 @@
+#ifndef _IP6T_IMQ_H
+#define _IP6T_IMQ_H
+
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_IMQ.h>
+
+#define ip6t_imq_info xt_imq_info
+
+#endif /* _IP6T_IMQ_H */
+
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c0a4f3a..85a6588 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -29,6 +29,9 @@
 #include <linux/rcupdate.h>
 #include <linux/dmaengine.h>
 #include <linux/hrtimer.h>
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+#include <linux/imq.h>
+#endif
 
 /* Don't change this without changing skb_csum_unnecessary! */
 #define CHECKSUM_NONE 0
@@ -339,6 +342,9 @@
 	 * first. This is owned by whoever has the skb queued ATM.
 	 */
 	char			cb[48] __aligned(8);
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	void			*cb_next;
+#endif
 
 	unsigned long		_skb_refdst;
 #ifdef CONFIG_XFRM
@@ -377,6 +383,9 @@
 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	struct sk_buff		*nfct_reasm;
 #endif
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	struct nf_queue_entry	*nf_queue_entry;
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct nf_bridge_info	*nf_bridge;
 #endif
@@ -401,6 +410,10 @@
 
 	/* 0/13 bit hole */
 
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	__u8			imq_flags:IMQ_F_BITS;
+#endif
+
 #ifdef CONFIG_NET_DMA
 	dma_cookie_t		dma_cookie;
 #endif
@@ -487,6 +500,12 @@
 	return (struct rtable *)skb_dst(skb);
 }
 
+
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+extern int skb_save_cb(struct sk_buff *skb);
+extern int skb_restore_cb(struct sk_buff *skb);
+#endif
+
 extern void kfree_skb(struct sk_buff *skb);
 extern void consume_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
@@ -2134,6 +2153,10 @@
 	dst->nfct_reasm = src->nfct_reasm;
 	nf_conntrack_get_reasm(src->nfct_reasm);
 #endif
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	dst->imq_flags = src->imq_flags;
+	dst->nf_queue_entry = src->nf_queue_entry;
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	dst->nf_bridge  = src->nf_bridge;
 	nf_bridge_get(src->nf_bridge);
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 252fd10..b1c2483 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -30,5 +30,11 @@
 				       const struct nf_queue_handler *qh);
 extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
 extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
+extern void nf_unregister_queue_imq_handler(void);
+#endif
 
 #endif /* _NF_QUEUE_H */
diff --git a/mm/slab.c b/mm/slab.c
index d96e223..15b032f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -710,6 +710,10 @@
 #ifdef CONFIG_ZONE_DMA
 	if (unlikely(gfpflags & GFP_DMA))
 		return csizep->cs_dmacachep;
+#ifdef CONFIG_ZONE_DMA_ALL_KERNEL
+	if (likely((gfpflags & GFP_USER) != GFP_USER))
+		return csizep->cs_dmacachep;
+#endif
 #endif
 	return csizep->cs_cachep;
 }
@@ -2304,6 +2308,7 @@
 		gfp = GFP_NOWAIT;
 
 	/* Get cache's description obj. */
+	/* FIXME seems here */
 	cachep = kmem_cache_zalloc(&cache_cache, gfp);
 	if (!cachep)
 		goto oops;
@@ -2738,6 +2743,10 @@
 	if (CONFIG_ZONE_DMA_FLAG) {
 		if (flags & GFP_DMA)
 			BUG_ON(!(cachep->gfpflags & GFP_DMA));
+#ifdef CONFIG_ZONE_DMA_ALL_KERNEL
+		else if ((flags & GFP_USER) != GFP_USER)
+			;
+#endif
 		else
 			BUG_ON(cachep->gfpflags & GFP_DMA);
 	}
diff --git a/net/core/dev.c b/net/core/dev.c
index 9c58c1e..c02a746 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -98,6 +98,9 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <linux/rtnetlink.h>
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+#include <linux/imq.h>
+#endif
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/stat.h>
@@ -2108,12 +2111,21 @@
 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
 			skb_dst_drop(skb);
 
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+		if (!list_empty(&ptype_all) &&
+					!(skb->imq_flags & IMQ_F_ENQUEUE))
+#else
 		if (!list_empty(&ptype_all))
+#endif
 			dev_queue_xmit_nit(skb, dev);
 
 		skb_orphan_try(skb);
 
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+		features = skb->dev ? netif_skb_features(skb) : dev->features;
+#else
 		features = netif_skb_features(skb);
+#endif
 
 		if (vlan_tx_tag_present(skb) &&
 		    !(features & NETIF_F_HW_VLAN_TX)) {
@@ -2280,8 +2292,7 @@
 #endif
 }
 
-static struct netdev_queue *dev_pick_tx(struct net_device *dev,
-					struct sk_buff *skb)
+static struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
 {
 	int queue_index;
 	const struct net_device_ops *ops = dev->netdev_ops;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 46cbd28..f264b6f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -73,6 +73,9 @@
 
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
+#endif
 
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
 				  struct pipe_buffer *buf)
@@ -92,6 +95,82 @@
 	return 1;
 }
 
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+/* Control buffer save/restore for IMQ devices */
+struct skb_cb_table {
+	char			cb[48] __aligned(8);
+	void			*cb_next;
+	atomic_t		refcnt;
+};
+
+static DEFINE_SPINLOCK(skb_cb_store_lock);
+
+int skb_save_cb(struct sk_buff *skb)
+{
+	struct skb_cb_table *next;
+
+	next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
+	if (!next)
+		return -ENOMEM;
+
+	BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
+
+	memcpy(next->cb, skb->cb, sizeof(skb->cb));
+	next->cb_next = skb->cb_next;
+
+	atomic_set(&next->refcnt, 1);
+
+	skb->cb_next = next;
+	return 0;
+}
+EXPORT_SYMBOL(skb_save_cb);
+
+int skb_restore_cb(struct sk_buff *skb)
+{
+	struct skb_cb_table *next;
+
+	if (!skb->cb_next)
+		return 0;
+
+	next = skb->cb_next;
+
+	BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
+
+	memcpy(skb->cb, next->cb, sizeof(skb->cb));
+	skb->cb_next = next->cb_next;
+
+	spin_lock(&skb_cb_store_lock);
+
+	if (atomic_dec_and_test(&next->refcnt))
+		kmem_cache_free(skbuff_cb_store_cache, next);
+
+	spin_unlock(&skb_cb_store_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(skb_restore_cb);
+
+static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
+{
+	struct skb_cb_table *next;
+	struct sk_buff *old;
+
+	if (!__old->cb_next) {
+		new->cb_next = NULL;
+		return;
+	}
+
+	spin_lock(&skb_cb_store_lock);
+
+	old = (struct sk_buff *)__old;
+
+	next = old->cb_next;
+	atomic_inc(&next->refcnt);
+	new->cb_next = next;
+
+	spin_unlock(&skb_cb_store_lock);
+}
+#endif
 
 /* Pipe buffer operations for a socket. */
 static const struct pipe_buf_operations sock_pipe_buf_ops = {
@@ -380,6 +459,26 @@
 		WARN_ON(in_irq());
 		skb->destructor(skb);
 	}
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	/* This should not happen. When it does, avoid memleak by restoring
+	the chain of cb-backups. */
+	while (skb->cb_next != NULL) {
+		if (net_ratelimit())
+			printk(KERN_WARNING "IMQ: kfree_skb: skb->cb_next: "
+				"%08x\n", (unsigned int)skb->cb_next);
+
+		skb_restore_cb(skb);
+	}
+	/* This should not happen either, nf_queue_entry is nullified in
+	 * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
+	 * leaking entry pointers, maybe memory. We don't know if this is
+	 * pointer to already freed memory, or should this be freed.
+	 * If this happens we need to add refcounting, etc for nf_queue_entry.
+	 */
+	if (skb->nf_queue_entry && net_ratelimit())
+		printk(KERN_WARNING
+				"IMQ: kfree_skb: skb->nf_queue_entry != NULL");
+#endif
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(skb->nfct);
 #endif
@@ -518,6 +617,9 @@
 	new->sp			= secpath_get(old->sp);
 #endif
 	memcpy(new->cb, old->cb, sizeof(old->cb));
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	skb_copy_stored_cb(new, old);
+#endif
 	new->csum		= old->csum;
 	new->local_df		= old->local_df;
 	new->pkt_type		= old->pkt_type;
@@ -2781,6 +2883,13 @@
 						0,
 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
 						NULL);
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
+						  sizeof(struct skb_cb_table),
+						  0,
+						  SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+						  NULL);
+#endif
 }
 
 /**
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 32bff6d..95d5b4e 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -507,6 +507,18 @@
 	  For more information on the LEDs available on your system, see
 	  Documentation/leds-class.txt
 
+config NETFILTER_XT_TARGET_IMQ
+        tristate '"IMQ" target support'
+	depends on NETFILTER_XTABLES
+	depends on IP_NF_MANGLE || IP6_NF_MANGLE
+	select IMQ
+	default m if NETFILTER_ADVANCED=n
+        help
+          This option adds a `IMQ' target which is used to specify if and
+          to which imq device packets should get enqueued/dequeued.
+
+          To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_MARK
 	tristate '"MARK" target support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1a02853..2168bdb 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -56,6 +56,7 @@
 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 899b71c..924cf80 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -191,6 +191,20 @@
 			kfree_skb(skb);
 		}
 		ret = 0;
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	} else if ((verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
+		ret = nf_imq_queue(skb, elem, pf, hook, indev, outdev, okfn,
+			       verdict >> NF_VERDICT_QBITS);
+		if (ret < 0) {
+			if (ret == -ECANCELED)
+				goto next_hook;
+			if (ret == -ESRCH &&
+			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+				goto next_hook;
+			kfree_skb(skb);
+		}
+		ret = 0;
+#endif
 	}
 	rcu_read_unlock();
 	return ret;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 770f764..903ea39 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -30,6 +30,15 @@
 		    struct net_device *outdev,
 		    int (*okfn)(struct sk_buff *),
 		    unsigned int queuenum);
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+extern int nf_imq_queue(struct sk_buff *skb,
+		    struct list_head *elem,
+		    u_int8_t pf, unsigned int hook,
+		    struct net_device *indev,
+		    struct net_device *outdev,
+		    int (*okfn)(struct sk_buff *),
+		    unsigned int queuenum);
+#endif
 extern int __init netfilter_queue_init(void);
 
 /* nf_log.c */
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5b466cd..75ff78b 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -22,6 +22,26 @@
 
 static DEFINE_MUTEX(queue_handler_mutex);
 
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+static const struct nf_queue_handler *queue_imq_handler;
+
+void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
+{
+	mutex_lock(&queue_handler_mutex);
+	rcu_assign_pointer(queue_imq_handler, qh);
+	mutex_unlock(&queue_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
+
+void nf_unregister_queue_imq_handler(void)
+{
+	mutex_lock(&queue_handler_mutex);
+	rcu_assign_pointer(queue_imq_handler, NULL);
+	mutex_unlock(&queue_handler_mutex);
+}
+EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
+#endif
+
 /* return EBUSY when somebody else is registered, return EEXIST if the
  * same handler is registered, return 0 in case of success. */
 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
@@ -92,7 +112,7 @@
 }
 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
 
-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
+void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
 {
 	/* Release those devices we held, or Alexey will kill me. */
 	if (entry->indev)
@@ -112,6 +132,7 @@
 	/* Drop reference to owner of hook which queued us. */
 	module_put(entry->elem->owner);
 }
+EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 
 /*
  * Any packet that leaves via this function must come back
@@ -123,7 +144,8 @@
 		      struct net_device *indev,
 		      struct net_device *outdev,
 		      int (*okfn)(struct sk_buff *),
-		      unsigned int queuenum)
+		      unsigned int queuenum,
+		      bool imq_queue)
 {
 	int status = -ENOENT;
 	struct nf_queue_entry *entry = NULL;
@@ -137,7 +159,14 @@
 	/* QUEUE == DROP if no one is waiting, to be safe. */
 	rcu_read_lock();
 
-	qh = rcu_dereference(queue_handler[pf]);
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	if (imq_queue)
+		qh = rcu_dereference(queue_imq_handler);
+	else
+		qh = rcu_dereference(queue_handler[pf]);
+#else
+ 	qh = rcu_dereference(queue_handler[pf]);
+#endif
 	if (!qh) {
 		status = -ESRCH;
 		goto err_unlock;
@@ -203,13 +232,14 @@
 	return status;
 }
 
-int nf_queue(struct sk_buff *skb,
-	     struct list_head *elem,
-	     u_int8_t pf, unsigned int hook,
-	     struct net_device *indev,
-	     struct net_device *outdev,
-	     int (*okfn)(struct sk_buff *),
-	     unsigned int queuenum)
+static int _nf_queue(struct sk_buff *skb,
+		     struct list_head *elem,
+		     u_int8_t pf, unsigned int hook,
+		     struct net_device *indev,
+		     struct net_device *outdev,
+		     int (*okfn)(struct sk_buff *),
+		     unsigned int queuenum,
+		     bool imq_queue)
 {
 	struct sk_buff *segs;
 	int err;
@@ -217,7 +247,7 @@
 
 	if (!skb_is_gso(skb))
 		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
-				  queuenum);
+				  queuenum, imq_queue);
 
 	switch (pf) {
 	case NFPROTO_IPV4:
@@ -244,7 +274,7 @@
 		segs->next = NULL;
 		if (err == 0)
 			err = __nf_queue(segs, elem, pf, hook, indev,
-					   outdev, okfn, queuenum);
+					 outdev, okfn, queuenum, imq_queue);
 		if (err == 0)
 			queued++;
 		else
@@ -260,6 +290,32 @@
 	return err;
 }
 
+int nf_queue(struct sk_buff *skb,
+	     struct list_head *elem,
+	     u_int8_t pf, unsigned int hook,
+	     struct net_device *indev,
+	     struct net_device *outdev,
+	     int (*okfn)(struct sk_buff *),
+	     unsigned int queuenum)
+{
+	return _nf_queue(skb, elem, pf, hook, indev, outdev, okfn, queuenum,
+			 false);
+}
+
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+int nf_imq_queue(struct sk_buff *skb,
+	     struct list_head *elem,
+	     u_int8_t pf, unsigned int hook,
+	     struct net_device *indev,
+	     struct net_device *outdev,
+	     int (*okfn)(struct sk_buff *),
+	     unsigned int queuenum)
+{
+	return _nf_queue(skb, elem, pf, hook, indev, outdev, okfn, queuenum,
+			 true);
+}
+#endif
+
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 {
 	struct sk_buff *skb = entry->skb;
@@ -301,7 +357,7 @@
 	case NF_QUEUE:
 		err = __nf_queue(skb, elem, entry->pf, entry->hook,
 				 entry->indev, entry->outdev, entry->okfn,
-				 verdict >> NF_VERDICT_QBITS);
+				 verdict >> NF_VERDICT_QBITS, false);
 		if (err < 0) {
 			if (err == -ECANCELED)
 				goto next_hook;
@@ -311,6 +367,21 @@
 			kfree_skb(skb);
 		}
 		break;
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+	case NF_IMQ_QUEUE:
+		err = __nf_queue(skb, elem, entry->pf, entry->hook,
+				 entry->indev, entry->outdev, entry->okfn,
+				 verdict >> NF_VERDICT_QBITS, true);
+		if (err < 0) {
+			if (err == -ECANCELED)
+				goto next_hook;
+			if (err == -ESRCH &&
+			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+				goto next_hook;
+			kfree_skb(skb);
+		}
+		break;
+#endif
 	case NF_STOLEN:
 	default:
 		kfree_skb(skb);
diff --git a/net/netfilter/xt_IMQ.c b/net/netfilter/xt_IMQ.c
new file mode 100644
index 0000000..2340cb4
--- /dev/null
+++ b/net/netfilter/xt_IMQ.c
@@ -0,0 +1,74 @@
+/*
+ * This target marks packets to be enqueued to an imq device
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_IMQ.h>
+#include <linux/imq.h>
+
+static unsigned int imq_target(struct sk_buff *pskb,
+				const struct xt_action_param *par)
+{
+	const struct xt_imq_info *mr = par->targinfo;
+
+	pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
+
+	return XT_CONTINUE;
+}
+
+static int imq_checkentry(const struct xt_tgchk_param *par)
+{
+	struct xt_imq_info *mr = par->targinfo;
+
+	if (mr->todev > IMQ_MAX_DEVS - 1) {
+		printk(KERN_WARNING
+		       "IMQ: invalid device specified, highest is %u\n",
+		       IMQ_MAX_DEVS - 1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct xt_target xt_imq_reg[] __read_mostly = {
+	{
+		.name           = "IMQ",
+		.family		= AF_INET,
+		.checkentry     = imq_checkentry,
+		.target         = imq_target,
+		.targetsize	= sizeof(struct xt_imq_info),
+		.table		= "mangle",
+		.me             = THIS_MODULE
+	},
+	{
+		.name           = "IMQ",
+		.family		= AF_INET6,
+		.checkentry     = imq_checkentry,
+		.target         = imq_target,
+		.targetsize	= sizeof(struct xt_imq_info),
+		.table		= "mangle",
+		.me             = THIS_MODULE
+	},
+};
+
+static int __init imq_init(void)
+{
+	return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
+}
+
+static void __exit imq_fini(void)
+{
+	xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
+}
+
+module_init(imq_init);
+module_exit(imq_fini);
+
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. "
+		   "See http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_IMQ");
+MODULE_ALIAS("ip6t_IMQ");
+