Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
Pull Ceph fix from Sage Weil:
"This drops a bad assert that a few users have been hitting but we've
only recently been able to track down"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client:
rbd: drop an unsafe assertion
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index e6b72d3..68c0f51 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -124,12 +124,11 @@
Updating on-disk metadata
-------------------------
-On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
-written. If no such requests are made then commits will occur every
-second. This means the cache behaves like a physical disk that has a
-write cache (the same is true of the thin-provisioning target). If
-power is lost you may lose some recent writes. The metadata should
-always be consistent in spite of any crash.
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second. This
+means the cache behaves like a physical disk that has a volatile write
+cache. If power is lost you may lose some recent writes. The metadata
+should always be consistent in spite of any crash.
The 'dirty' state for a cache block changes far too frequently for us
to keep updating it on the fly. So we treat it as a hint. In normal
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 8a7a3d4..05a27e9 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -116,6 +116,35 @@
userspace daemon can use this to detect a situation where a new table
already exceeds the threshold.
+A low water mark for the metadata device is maintained in the kernel and
+will trigger a dm event if free space on the metadata device drops below
+it.
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second. This
+means the thin-provisioning target behaves like a physical disk that has
+a volatile write cache. If power is lost you may lose some recent
+writes. The metadata should always be consistent in spite of any crash.
+
+If data space is exhausted the pool will either error or queue IO
+according to the configuration (see: error_if_no_space). If metadata
+space is exhausted or a metadata operation fails: the pool will error IO
+until the pool is taken offline and repair is performed to 1) fix any
+potential inconsistencies and 2) clear the flag that imposes repair.
+Once the pool's metadata device is repaired it may be resized, which
+will allow the pool to return to normal operation. Note that if a pool
+is flagged as needing repair, the pool's data and metadata devices
+cannot be resized until repair is performed. It should also be noted
+that when the pool's metadata space is exhausted the current metadata
+transaction is aborted. Given that the pool will cache IO whose
+completion may have already been acknowledged to upper IO layers
+(e.g. filesystem) it is strongly suggested that consistency checks
+(e.g. fsck) be performed on those layers when repair of the pool is
+required.
+
Thin provisioning
-----------------
@@ -258,10 +287,9 @@
should register for the event and then check the target's status.
held metadata root:
- The location, in sectors, of the metadata root that has been
+ The location, in blocks, of the metadata root that has been
'held' for userspace read access. '-' indicates there is no
- held root. This feature is not yet implemented so '-' is
- always returned.
+ held root.
discard_passdown|no_discard_passdown
Whether or not discards are actually being passed down to the
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index a6a352c..5992dce 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -21,9 +21,9 @@
must appear in the same order as the output clocks.
- #clock-cells: Must be 1
- clock-output-names: The name of the clocks as free-form strings
- - renesas,indices: Indices of the gate clocks into the group (0 to 31)
+ - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31)
-The clocks, clock-output-names and renesas,indices properties contain one
+The clocks, clock-output-names and renesas,clock-indices properties contain one
entry per gate clock. The MSTP groups are sparsely populated. Unimplemented
gate clocks must not be declared.
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index 11ace3c..4fc3927 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -7,3 +7,4 @@
Optional properties:
- local-mac-address : Ethernet mac address to use
+- vdd-supply: supply for Ethernet mac
diff --git a/Documentation/devicetree/bindings/net/opencores-ethoc.txt b/Documentation/devicetree/bindings/net/opencores-ethoc.txt
new file mode 100644
index 0000000..2dc127c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/opencores-ethoc.txt
@@ -0,0 +1,22 @@
+* OpenCores MAC 10/100 Mbps
+
+Required properties:
+- compatible: Should be "opencores,ethoc".
+- reg: two memory regions (address and length),
+ first region is for the device registers and descriptor rings,
+ second is for the device packet memory.
+- interrupts: interrupt for the device.
+
+Optional properties:
+- clocks: phandle to refer to the clk used as per
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Examples:
+
+ enet0: ethoc@fd030000 {
+ compatible = "opencores,ethoc";
+ reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
+ interrupts = <1>;
+ local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
similarity index 98%
rename from Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt
rename to Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
index 9e9e9ef..c119deb 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
@@ -1,4 +1,4 @@
-Broadcom Capri Pin Controller
+Broadcom BCM281xx Pin Controller
This is a pin controller for the Broadcom BCM281xx SoC family, which includes
BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
@@ -7,14 +7,14 @@
Required Properties:
-- compatible: Must be "brcm,capri-pinctrl".
+- compatible: Must be "brcm,bcm11351-pinctrl"
- reg: Base address of the PAD Controller register block and the size
of the block.
For example, the following is the bare minimum node:
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};
@@ -119,7 +119,7 @@
Example:
// pin controller node
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcmbcm11351-pinctrl";
reg = <0x35004800 0x430>;
// pin configuration node
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index f3089d4..0cbe6ec 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -554,12 +554,6 @@
not specified in the struct can_frame and therefore it is only valid in
CANFD_MTU sized CAN FD frames.
- As long as the payload length is <=8 the received CAN frames from CAN FD
- capable CAN devices can be received and read by legacy sockets too. When
- user-generated CAN FD frames have a payload length <=8 these can be send
- by legacy CAN network interfaces too. Sending CAN FD frames with payload
- length > 8 to a legacy CAN network interface returns an -EMSGSIZE error.
-
Implementation hint for new CAN applications:
To build a CAN FD aware application use struct canfd_frame as basic CAN
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index b261229..c6af4ba 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -226,9 +226,9 @@
void *rx_ring, *tx_ring;
/* Configure ring parameters */
- if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0)
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0)
exit(1);
- if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0)
exit(1)
/* Calculate size of each individual ring */
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 1404674..6fea79e 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -453,7 +453,7 @@
enabled previously with setsockopt() and
the PACKET_COPY_THRESH option.
- The number of frames than can be buffered to
+ The number of frames that can be buffered to
be read with recvfrom is limited like a normal socket.
See the SO_RCVBUF option in the socket (7) man page.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 661d3c3..048c92b 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -21,26 +21,38 @@
SO_TIMESTAMPING:
-Instructs the socket layer which kind of information is wanted. The
-parameter is an integer with some of the following bits set. Setting
-other bits is an error and doesn't change the current state.
+Instructs the socket layer which kind of information should be collected
+and/or reported. The parameter is an integer with some of the following
+bits set. Setting other bits is an error and doesn't change the current
+state.
-SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamp in hardware
-SOF_TIMESTAMPING_TX_SOFTWARE: if SOF_TIMESTAMPING_TX_HARDWARE is off or
- fails, then do it in software
-SOF_TIMESTAMPING_RX_HARDWARE: return the original, unmodified time stamp
- as generated by the hardware
-SOF_TIMESTAMPING_RX_SOFTWARE: if SOF_TIMESTAMPING_RX_HARDWARE is off or
- fails, then do it in software
-SOF_TIMESTAMPING_RAW_HARDWARE: return original raw hardware time stamp
-SOF_TIMESTAMPING_SYS_HARDWARE: return hardware time stamp transformed to
- the system time base
-SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in
- software
+Four of the bits are requests to the stack to try to generate
+timestamps. Any combination of them is valid.
-SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
-SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
-following control message:
+SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware
+SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software
+SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware
+SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software
+
+The other three bits control which timestamps will be reported in a
+generated control message. If none of these bits are set or if none of
+the set bits correspond to data that is available, then the control
+message will not be generated:
+
+SOF_TIMESTAMPING_SOFTWARE: report systime if available
+SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available
+SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available
+
+It is worth noting that timestamps may be collected for reasons other
+than being requested by a particular socket with
+SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that
+can generate hardware receive timestamps ignore
+SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag
+in case future drivers pay attention.
+
+If timestamps are reported, they will appear in a control message with
+cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like
+this:
struct scm_timestamping {
struct timespec systime;
diff --git a/MAINTAINERS b/MAINTAINERS
index c6d0e93..a80c84e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -73,7 +73,8 @@
L: Mailing list that is relevant to this area
W: Web-page with status/info
Q: Patchwork web based patch tracking system site
- T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit.
+ T: SCM tree type and location.
+ Type is one of: git, hg, quilt, stgit, topgit
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -473,7 +474,7 @@
AGPGART DRIVER
M: David Airlie <airlied@linux.ie>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
S: Maintained
F: drivers/char/agp/
F: include/linux/agp*
@@ -910,11 +911,11 @@
F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
-M: Shawn Guo <shawn.guo@linaro.org>
+M: Shawn Guo <shawn.guo@freescale.com>
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/mach-imx/
F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
@@ -1612,11 +1613,11 @@
F: drivers/net/wireless/atmel*
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
-M: Bradley Grove <linuxdrivers@attotech.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.attotech.com
-S: Supported
-F: drivers/scsi/esas2r
+M: Bradley Grove <linuxdrivers@attotech.com>
+L: linux-scsi@vger.kernel.org
+W: http://www.attotech.com
+S: Supported
+F: drivers/scsi/esas2r
AUDIT SUBSYSTEM
M: Eric Paris <eparis@redhat.com>
@@ -1737,6 +1738,7 @@
BLACKFIN ARCHITECTURE
M: Steven Miao <realmz6@gmail.com>
L: adi-buildroot-devel@lists.sourceforge.net
+T: git git://git.code.sf.net/p/adi-linux/code
W: http://blackfin.uclinux.org
S: Supported
F: arch/blackfin/
@@ -1830,8 +1832,8 @@
F: include/net/bluetooth/
BONDING DRIVER
-M: Jay Vosburgh <fubar@us.ibm.com>
-M: Veaceslav Falico <vfalico@redhat.com>
+M: Jay Vosburgh <j.vosburgh@gmail.com>
+M: Veaceslav Falico <vfalico@gmail.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
@@ -2159,7 +2161,7 @@
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@freescale.com>
-T: git://github.com/hzpeterchen/linux-usb.git
+T: git git://github.com/hzpeterchen/linux-usb.git
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/chipidea/
@@ -2179,9 +2181,9 @@
F: drivers/net/ethernet/cisco/enic/
CISCO VIC LOW LATENCY NIC DRIVER
-M: Upinder Malhi <umalhi@cisco.com>
-S: Supported
-F: drivers/infiniband/hw/usnic
+M: Upinder Malhi <umalhi@cisco.com>
+S: Supported
+F: drivers/infiniband/hw/usnic
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Hartley Sweeten <hsweeten@visionengravers.com>
@@ -2378,20 +2380,20 @@
F: drivers/cpufreq/arm_big_little_dt.c
CPUIDLE DRIVER - ARM BIG LITTLE
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-M: Daniel Lezcano <daniel.lezcano@linaro.org>
-L: linux-pm@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
-T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
-S: Maintained
-F: drivers/cpuidle/cpuidle-big_little.c
+M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Daniel Lezcano <daniel.lezcano@linaro.org>
+L: linux-pm@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+S: Maintained
+F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVERS
M: Rafael J. Wysocki <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
-T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
@@ -2458,9 +2460,9 @@
F: sound/pci/cs5535audio/
CW1200 WLAN driver
-M: Solomon Peachy <pizza@shaftnet.org>
-S: Maintained
-F: drivers/net/wireless/cw1200/
+M: Solomon Peachy <pizza@shaftnet.org>
+S: Maintained
+F: drivers/net/wireless/cw1200/
CX18 VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
@@ -3095,6 +3097,8 @@
EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com>
+M: Borislav Petkov <bp@alien8.de>
+M: Mauro Carvalho Chehab <m.chehab@samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Supported
@@ -4541,6 +4545,7 @@
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
M: Mitch Williams <mitch.a.williams@intel.com>
+M: Linux NICS <linux.nics@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
@@ -4558,6 +4563,7 @@
F: Documentation/networking/i40e.txt
F: Documentation/networking/i40evf.txt
F: drivers/net/ethernet/intel/
+F: drivers/net/ethernet/intel/*/
INTEL-MID GPIO DRIVER
M: David Cohen <david.a.cohen@linux.intel.com>
@@ -4914,7 +4920,7 @@
KCONFIG
M: "Yann E. MORIN" <yann.morin.1998@free.fr>
L: linux-kbuild@vger.kernel.org
-T: git://gitorious.org/linux-kconfig/linux-kconfig
+T: git git://gitorious.org/linux-kconfig/linux-kconfig
S: Maintained
F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/
@@ -5471,11 +5477,11 @@
F: drivers/media/tuners/m88ts2022*
MA901 MASTERKIT USB FM RADIO DRIVER
-M: Alexey Klimov <klimov.linux@gmail.com>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-S: Maintained
-F: drivers/media/radio/radio-ma901.c
+M: Alexey Klimov <klimov.linux@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/radio-ma901.c
MAC80211
M: Johannes Berg <johannes@sipsolutions.net>
@@ -5636,7 +5642,7 @@
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Amir Vadai <amirv@mellanox.com>
-L: netdev@vger.kernel.org
+L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
@@ -5677,7 +5683,7 @@
F: include/uapi/mtd/
MEN A21 WATCHDOG DRIVER
-M: Johannes Thumshirn <johannes.thumshirn@men.de>
+M: Johannes Thumshirn <johannes.thumshirn@men.de>
L: linux-watchdog@vger.kernel.org
S: Supported
F: drivers/watchdog/mena21_wdt.c
@@ -5733,20 +5739,20 @@
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git://openfabrics.org/~eli/connect-ib.git
+T: git git://openfabrics.org/~eli/connect-ib.git
S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
Mellanox MLX5 IB driver
-M: Eli Cohen <eli@mellanox.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.mellanox.com
-Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git://openfabrics.org/~eli/connect-ib.git
-S: Supported
-F: include/linux/mlx5/
-F: drivers/infiniband/hw/mlx5/
+M: Eli Cohen <eli@mellanox.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.mellanox.com
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+T: git git://openfabrics.org/~eli/connect-ib.git
+S: Supported
+F: include/linux/mlx5/
+F: drivers/infiniband/hw/mlx5/
MODULE SUPPORT
M: Rusty Russell <rusty@rustcorp.com.au>
@@ -5998,6 +6004,9 @@
F: include/uapi/linux/in.h
F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
+F: tools/net/
+F: tools/testing/selftests/net/
+F: lib/random32.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
@@ -6171,6 +6180,12 @@
F: drivers/block/nvme*
F: include/linux/nvme.h
+NXP TDA998X DRM DRIVER
+M: Russell King <rmk+kernel@arm.linux.org.uk>
+S: Supported
+F: drivers/gpu/drm/i2c/tda998x_drv.c
+F: include/drm/i2c/tda998x.h
+
OMAP SUPPORT
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
@@ -8700,17 +8715,17 @@
F: drivers/media/radio/radio-raremono.c
THERMAL
-M: Zhang Rui <rui.zhang@intel.com>
-M: Eduardo Valentin <eduardo.valentin@ti.com>
-L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
-Q: https://patchwork.kernel.org/project/linux-pm/list/
-S: Supported
-F: drivers/thermal/
-F: include/linux/thermal.h
-F: include/linux/cpu_cooling.h
-F: Documentation/devicetree/bindings/thermal/
+M: Zhang Rui <rui.zhang@intel.com>
+M: Eduardo Valentin <eduardo.valentin@ti.com>
+L: linux-pm@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
+Q: https://patchwork.kernel.org/project/linux-pm/list/
+S: Supported
+F: drivers/thermal/
+F: include/linux/thermal.h
+F: include/linux/cpu_cooling.h
+F: Documentation/devicetree/bindings/thermal/
THINGM BLINK(1) USB RGB LED DRIVER
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
@@ -9812,7 +9827,7 @@
L: mjpeg-users@lists.sourceforge.net
L: linux-media@vger.kernel.org
W: http://mjpeg.sourceforge.net/driver-zoran/
-T: Mercurial http://linuxtv.org/hg/v4l-dvb
+T: hg http://linuxtv.org/hg/v4l-dvb
S: Odd Fixes
F: drivers/media/pci/zoran/
diff --git a/Makefile b/Makefile
index 78209ee..c10b734 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc8
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 6b58c1d..400c663 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -282,7 +282,7 @@
#else
/* if V-P const for loop, PTAG can be written once outside loop */
if (full_page_op)
- write_aux_reg(ARC_REG_DC_PTAG, paddr);
+ write_aux_reg(aux_tag, paddr);
#endif
while (num_lines-- > 0) {
@@ -296,7 +296,7 @@
write_aux_reg(aux_cmd, vaddr);
vaddr += L1_CACHE_BYTES;
#else
- write_aux_reg(aux, paddr);
+ write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
#endif
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e254198..1594945 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1578,6 +1578,7 @@
choice
prompt "Memory split"
+ depends on MMU
default VMSPLIT_3G
help
Select the desired split between kernel and user memory.
@@ -1595,6 +1596,7 @@
config PAGE_OFFSET
hex
+ default PHYS_OFFSET if !MMU
default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G
default 0xC0000000
@@ -1903,6 +1905,7 @@
depends on ARM && AEABI && OF
depends on CPU_V7 && !CPU_V6
depends on !GENERIC_ATOMIC64
+ depends on MMU
select ARM_PSCI
select SWIOTLB_XEN
select ARCH_DMA_ADDR_T_64BIT
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index 47279aa..0714e03 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,4 +1,5 @@
ashldi3.S
+bswapsdi2.S
font.c
lib1funcs.S
hyp-stub.S
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index e491b82..792fde1 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -147,7 +147,7 @@
};
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};
diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi
index 2363593..ef58d1c 100644
--- a/arch/arm/boot/dts/keystone-clocks.dtsi
+++ b/arch/arm/boot/dts/keystone-clocks.dtsi
@@ -612,7 +612,7 @@
compatible = "ti,keystone,psc-clock";
clocks = <&chipclk13>;
clock-output-names = "vcp-3";
- reg = <0x0235000a8 0xb00>, <0x02350060 0x400>;
+ reg = <0x023500a8 0xb00>, <0x02350060 0x400>;
reg-names = "control", "domain";
domain-id = <24>;
};
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts
index c551e4a..d3b253b 100644
--- a/arch/arm/boot/dts/omap3-gta04.dts
+++ b/arch/arm/boot/dts/omap3-gta04.dts
@@ -13,7 +13,7 @@
/ {
model = "OMAP3 GTA04";
- compatible = "ti,omap3-gta04", "ti,omap3";
+ compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 25a2b5f..f2779ac 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -14,7 +14,7 @@
/ {
model = "IGEPv2 (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0020", "ti,omap3";
+ compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
leds {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 145c58c..2793749 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -13,7 +13,7 @@
/ {
model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0030", "ti,omap3";
+ compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
leds {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi
index 6c31c26..db58cad 100644
--- a/arch/arm/boot/dts/sama5d36.dtsi
+++ b/arch/arm/boot/dts/sama5d36.dtsi
@@ -8,8 +8,8 @@
*/
#include "sama5d3.dtsi"
#include "sama5d3_can.dtsi"
-#include "sama5d3_emac.dtsi"
#include "sama5d3_gmac.dtsi"
+#include "sama5d3_emac.dtsi"
#include "sama5d3_lcd.dtsi"
#include "sama5d3_mci2.dtsi"
#include "sama5d3_tcb1.dtsi"
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 10666ca..d4d2763 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -426,7 +426,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 6496159..79fd412 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -383,7 +383,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 320335a..c463fd7 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -346,7 +346,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 9ff0948..6f25cf5 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -454,7 +454,7 @@
rtc: rtc@01c20d00 {
compatible = "allwinner,sun7i-a20-rtc";
reg = <0x01c20d00 0x20>;
- interrupts = <0 24 1>;
+ interrupts = <0 24 4>;
};
sid: eeprom@01c23800 {
@@ -463,7 +463,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <0 29 4>;
};
@@ -596,10 +596,10 @@
hstimer@01c60000 {
compatible = "allwinner,sun7i-a20-hstimer";
reg = <0x01c60000 0x1000>;
- interrupts = <0 81 1>,
- <0 82 1>,
- <0 83 1>,
- <0 84 1>;
+ interrupts = <0 81 4>,
+ <0 82 4>,
+ <0 83 4>,
+ <0 84 4>;
clocks = <&ahb_gates 28>;
};
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 00fe9e9..27d69b5 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -204,7 +204,10 @@
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_ONESHOT=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 8756e4b..4afb376 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -30,14 +30,15 @@
*/
#define UL(x) _AC(x, UL)
+/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+
#ifdef CONFIG_MMU
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
-#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
@@ -104,10 +105,6 @@
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET PLAT_PHYS_OFFSET
-#endif
-
/*
* The module can be at any place in ram in nommu mode.
*/
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 47cd974..c96ecac 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -177,6 +177,18 @@
.long __proc_info_end
.size __lookup_processor_type_data, . - __lookup_processor_type_data
+__error_lpae:
+#ifdef CONFIG_DEBUG_LL
+ adr r0, str_lpae
+ bl printascii
+ b __error
+str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
+#else
+ b __error
+#endif
+ .align
+ENDPROC(__error_lpae)
+
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 914616e..f5f381d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -102,7 +102,7 @@
and r3, r3, #0xf @ extract VMSA support
cmp r3, #5 @ long-descriptor translation table format?
THUMB( it lo ) @ force fixup-able long branch encoding
- blo __error_p @ only classic page table format
+ blo __error_lpae @ only classic page table format
#endif
#ifndef CONFIG_XIP_KERNEL
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 3b05aea..11ed915 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -433,7 +433,9 @@
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
+ .set_rate = &omap3_clkoutx2_set_rate,
.recalc_rate = &omap3_clkoutx2_recalc,
+ .round_rate = &omap3_clkoutx2_round_rate,
};
static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 4c158c8..01fc710 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -23,6 +23,8 @@
#include "prm.h"
#include "clockdomain.h"
+#define MAX_CPUS 2
+
/* Machine specific information */
struct idle_statedata {
u32 cpu_state;
@@ -48,11 +50,11 @@
},
};
-static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
-static struct clockdomain *cpu_clkdm[NR_CPUS];
+static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
+static struct clockdomain *cpu_clkdm[MAX_CPUS];
static atomic_t abort_barrier;
-static bool cpu_done[NR_CPUS];
+static bool cpu_done[MAX_CPUS];
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
/* Private functions */
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index 3185ced..3c418ea 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -623,25 +623,12 @@
/* Clock control for DPLL outputs */
-/**
- * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
- * @clk: DPLL output struct clk
- *
- * Using parent clock DPLL data, look up DPLL state. If locked, set our
- * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
- */
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
- unsigned long parent_rate)
+/* Find the parent DPLL for the given clkoutx2 clock */
+static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
{
- const struct dpll_data *dd;
- unsigned long rate;
- u32 v;
struct clk_hw_omap *pclk = NULL;
struct clk *parent;
- if (!parent_rate)
- return 0;
-
/* Walk up the parents of clk, looking for a DPLL */
do {
do {
@@ -656,9 +643,35 @@
/* clk does not have a DPLL as a parent? error in the clock data */
if (!pclk) {
WARN_ON(1);
- return 0;
+ return NULL;
}
+ return pclk;
+}
+
+/**
+ * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
+ * @clk: DPLL output struct clk
+ *
+ * Using parent clock DPLL data, look up DPLL state. If locked, set our
+ * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
+ */
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ const struct dpll_data *dd;
+ unsigned long rate;
+ u32 v;
+ struct clk_hw_omap *pclk = NULL;
+
+ if (!parent_rate)
+ return 0;
+
+ pclk = omap3_find_clkoutx2_dpll(hw);
+
+ if (!pclk)
+ return 0;
+
dd = pclk->dpll_data;
WARN_ON(!dd->enable_mask);
@@ -672,6 +685,55 @@
return rate;
}
+int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ const struct dpll_data *dd;
+ u32 v;
+ struct clk_hw_omap *pclk = NULL;
+
+ if (!*prate)
+ return 0;
+
+ pclk = omap3_find_clkoutx2_dpll(hw);
+
+ if (!pclk)
+ return 0;
+
+ dd = pclk->dpll_data;
+
+ /* TYPE J does not have a clkoutx2 */
+ if (dd->flags & DPLL_J_TYPE) {
+ *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
+ return *prate;
+ }
+
+ WARN_ON(!dd->enable_mask);
+
+ v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ /* If in bypass, the rate is fixed to the bypass rate*/
+ if (v != OMAP3XXX_EN_DPLL_LOCKED)
+ return *prate;
+
+ if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ unsigned long best_parent;
+
+ best_parent = (rate / 2);
+ *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+ best_parent);
+ }
+
+ return *prate * 2;
+}
+
/* OMAP3/4 non-CORE DPLL clkops */
const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
.allow_idle = omap3_dpll_allow_idle,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 42d8188..1f33f5d 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1947,29 +1947,31 @@
goto dis_opt_clks;
_write_sysconfig(v, oh);
+
+ if (oh->class->sysc->srst_udelay)
+ udelay(oh->class->sysc->srst_udelay);
+
+ c = _wait_softreset_complete(oh);
+ if (c == MAX_MODULE_SOFTRESET_WAIT) {
+ pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
+ oh->name, MAX_MODULE_SOFTRESET_WAIT);
+ ret = -ETIMEDOUT;
+ goto dis_opt_clks;
+ } else {
+ pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
+ }
+
ret = _clear_softreset(oh, &v);
if (ret)
goto dis_opt_clks;
_write_sysconfig(v, oh);
- if (oh->class->sysc->srst_udelay)
- udelay(oh->class->sysc->srst_udelay);
-
- c = _wait_softreset_complete(oh);
- if (c == MAX_MODULE_SOFTRESET_WAIT)
- pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
- oh->name, MAX_MODULE_SOFTRESET_WAIT);
- else
- pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
-
/*
* XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
* _wait_target_ready() or _reset()
*/
- ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
-
dis_opt_clks:
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
_disable_optional_clocks(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 18f333c..810c205 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1365,11 +1365,10 @@
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 3d5b24d..c33e07e 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -22,6 +22,8 @@
#include "common-board-devices.h"
#include "dss-common.h"
#include "control.h"
+#include "omap-secure.h"
+#include "soc.h"
struct pdata_init {
const char *compatible;
@@ -169,6 +171,22 @@
omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
}
+
+static void __init nokia_n900_legacy_init(void)
+{
+ hsmmc2_internal_input_clk();
+
+ if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
+ if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
+ pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
+ /* set IBE to 1 */
+ rx51_secure_update_aux_cr(BIT(6), 0);
+ } else {
+ pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n");
+ pr_warning("Thumb binaries may crash randomly without this workaround\n");
+ }
+ }
+}
#endif /* CONFIG_ARCH_OMAP3 */
#ifdef CONFIG_ARCH_OMAP4
@@ -239,6 +257,7 @@
#endif
#ifdef CONFIG_ARCH_OMAP3
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
+ OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata),
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
/* Only on am3517 */
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
@@ -259,7 +278,7 @@
static struct pdata_init pdata_quirks[] __initdata = {
#ifdef CONFIG_ARCH_OMAP3
{ "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, },
- { "nokia,omap3-n900", hsmmc2_internal_input_clk, },
+ { "nokia,omap3-n900", nokia_n900_legacy_init, },
{ "nokia,omap3-n9", hsmmc2_internal_input_clk, },
{ "nokia,omap3-n950", hsmmc2_internal_input_clk, },
{ "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 6334b96..280f3c5 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -183,11 +183,11 @@
OMAP4_PRM_RSTCTRL_OFFSET);
v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
- OMAP4430_PRM_DEVICE_INST,
+ dev_inst,
OMAP4_PRM_RSTCTRL_OFFSET);
/* OCP barrier */
v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
- OMAP4430_PRM_DEVICE_INST,
+ dev_inst,
OMAP4_PRM_RSTCTRL_OFFSET);
}
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index f33679d..50e1d85 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -13,6 +13,8 @@
#ifndef __ASM_ARCH_COLLIE_H
#define __ASM_ARCH_COLLIE_H
+#include "hardware.h" /* Gives GPIO_MAX */
+
extern void locomolcd_power(int on);
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 2b3a564..ef69152 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -264,6 +264,9 @@
note_page(st, addr, 3, pmd_val(*pmd));
else
walk_pte(st, pmd, addr);
+
+ if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
+ note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
}
}
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 09c5a0f..86648c0 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -12,6 +12,7 @@
#define _ASM_C6X_CACHE_H
#include <linux/irqflags.h>
+#include <linux/init.h>
/*
* Cache line size
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index 184066c..053c17b 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -144,7 +144,7 @@
* definition, which doesn't have the same semantics. We don't want to
* use -fno-builtin, so just hide the name ffs.
*/
-#define ffs kernel_ffs
+#define ffs(x) kernel_ffs(x)
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index a96bcf8..20e8a9b 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,7 @@
/* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_exact_node(nid,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dcae3a7..95fa1f1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1776,12 +1776,12 @@
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
- range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
- default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
- range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
- default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
- range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
- default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
+ range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
+ default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
range 11 64
default "11"
help
@@ -2353,9 +2353,8 @@
If unsure, say Y. Only embedded should say N here.
config MIPS_O32_FP64_SUPPORT
- bool "Support for O32 binaries using 64-bit FP"
+ bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
depends on 32BIT || MIPS32_O32
- default y
help
When this is enabled, the kernel will support use of 64-bit floating
point registers with binaries using the O32 ABI along with the
@@ -2367,7 +2366,14 @@
of your kernel & potentially improve FP emulation performance by
saying N here.
- If unsure, say Y.
+ Although binutils currently supports use of this flag the details
+ concerning its effect upon the O32 ABI in userland are still being
+ worked on. In order to avoid userland becoming dependant upon current
+ behaviour before the details have been finalised, this option should
+ be considered experimental and only enabled by those working upon
+ said details.
+
+ If unsure, say N.
config USE_OF
bool
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index 9edc35f..acf9a2a 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -53,10 +53,8 @@
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
- if (!memsize_str)
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
- else
- strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 9969dba..25a59a2 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -52,10 +52,8 @@
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
- if (!memsize_str)
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
- else
- strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index 6d612e2..cdd8246 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -1,3 +1,4 @@
+#include <linux/errno.h>
#include <linux/export.h>
#include <linux/string.h>
#include <bcm47xx_board.h>
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 6decb27..2bed73a 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -196,7 +196,7 @@
char nvram_var[10];
char buf[30];
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 32; i++) {
err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
if (err <= 0)
continue;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 25fbfae..c2bb4f8 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -975,10 +975,6 @@
if (ciu > 1 || bit > 63)
return -EINVAL;
- /* These are the GPIO lines */
- if (ciu == 0 && bit >= 16 && bit < 32)
- return -EINVAL;
-
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@@ -1007,6 +1003,10 @@
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
+ /* Don't map irq if it is reserved for GPIO. */
+ if (line == 0 && bit >= 16 && bit <32)
+ return 0;
+
if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@@ -1525,10 +1525,6 @@
ciu = intspec[0];
bit = intspec[1];
- /* Line 7 are the GPIO lines */
- if (ciu > 6 || bit > 63)
- return -EINVAL;
-
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@@ -1570,8 +1566,14 @@
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
- /* Line 7 are the GPIO lines */
- if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
+ /*
+ * Don't map irq if it is reserved for GPIO.
+ * (Line 7 are the GPIO lines.)
+ */
+ if (line == 7)
+ return 0;
+
+ if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
if (octeon_irq_ciu2_is_edge(line, bit))
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 3220c93..4225e99 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -9,6 +9,7 @@
#define _ASM_ASMMACRO_H
#include <asm/hazards.h>
+#include <asm/asm-offsets.h>
#ifdef CONFIG_32BIT
#include <asm/asmmacro-32.h>
@@ -54,11 +55,21 @@
.endm
.macro local_irq_disable reg=t0
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, 1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
mfc0 \reg, CP0_STATUS
ori \reg, \reg, 1
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, -1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
.endm
#endif /* CONFIG_MIPS_MT_SMTC */
@@ -106,7 +117,7 @@
.endm
.macro fpu_save_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
@@ -159,7 +170,7 @@
.endm
.macro fpu_restore_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 6b97495..58e50cb 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -57,7 +57,7 @@
return 0;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index ce35c9a..992aaba 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -22,12 +22,12 @@
#define safe_load(load, src, dst, error) \
do { \
asm volatile ( \
- "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -35,8 +35,8 @@
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [dst] "=&r" (dst), [error] "=r" (error)\
- : [src] "r" (src) \
+ : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
+ : [tmp_src] "r" (src) \
: "memory" \
); \
} while (0)
@@ -44,12 +44,12 @@
#define safe_store(store, src, dst, error) \
do { \
asm volatile ( \
- "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -57,8 +57,8 @@
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [error] "=r" (error) \
- : [dst] "r" (dst), [src] "r" (src)\
+ : [tmp_err] "=r" (error) \
+ : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
: "memory" \
); \
} while (0)
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 33e8dbf..f35b131 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -13,6 +13,7 @@
#ifndef __ASM_MIPS_SYSCALL_H
#define __ASM_MIPS_SYSCALL_H
+#include <linux/compiler.h>
#include <linux/audit.h>
#include <linux/elf-em.h>
#include <linux/kernel.h>
@@ -39,14 +40,14 @@
#ifdef CONFIG_32BIT
case 4: case 5: case 6: case 7:
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
#endif
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
if (test_thread_flag(TIF_32BIT_REGS))
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
else
#endif
*arg = regs->regs[4 + n];
@@ -57,6 +58,8 @@
default:
BUG();
}
+
+ unreachable();
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -83,11 +86,10 @@
unsigned int i, unsigned int n,
unsigned long *args)
{
- unsigned long arg;
int ret;
while (n--)
- ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+ ret |= mips_get_syscall_arg(args++, task, regs, i++);
/*
* No way to communicate an error because this is a void function.
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index b39ba25..f25181b 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -163,8 +163,8 @@
*/
enum cop1x_func {
lwxc1_op = 0x00, ldxc1_op = 0x01,
- pfetch_op = 0x07, swxc1_op = 0x08,
- sdxc1_op = 0x09, madd_s_op = 0x20,
+ swxc1_op = 0x08, sdxc1_op = 0x09,
+ pfetch_op = 0x0f, madd_s_op = 0x20,
madd_d_op = 0x21, madd_e_op = 0x22,
msub_s_op = 0x28, msub_d_op = 0x29,
msub_e_op = 0x2a, nmadd_s_op = 0x30,
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 185ba25..374ed74 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -111,11 +111,10 @@
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
- ip += 4;
- safe_store_code(new_code2, ip, faulted);
+ safe_store_code(new_code2, ip + 4, faulted);
if (unlikely(faulted))
return -EFAULT;
- flush_icache_range(ip, ip + 8); /* original ip + 12 */
+ flush_icache_range(ip, ip + 8);
return 0;
}
#endif
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 253b2fb..73b0ddf 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -35,9 +35,9 @@
LEAF(_save_fp_context)
cfc1 t1, fcr31
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -146,11 +146,11 @@
* - cp1 status/control register
*/
LEAF(_restore_fp_context)
- EX lw t0, SC_FPC_CSR(a0)
+ EX lw t1, SC_FPC_CSR(a0)
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -191,7 +191,7 @@
EX ldc1 $f26, SC_FPREGS+208(a0)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context)
@@ -199,7 +199,7 @@
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
- EX lw t0, SC32_FPC_CSR(a0)
+ EX lw t1, SC32_FPC_CSR(a0)
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -239,7 +239,7 @@
EX ldc1 $f26, SC32_FPREGS+208(a0)
EX ldc1 $f28, SC32_FPREGS+224(a0)
EX ldc1 $f30, SC32_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context32)
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
index 56dc696..758fb3c 100644
--- a/arch/mips/kernel/rtlx-cmp.c
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -112,5 +112,8 @@
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 91d61ba..9c1aca0 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -144,5 +144,8 @@
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 506925b..0b4e2e3 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1538,10 +1538,10 @@
break;
}
- case 0x7: /* 7 */
- if (MIPSInst_FUNC(ir) != pfetch_op) {
+ case 0x3:
+ if (MIPSInst_FUNC(ir) != pfetch_op)
return SIGILL;
- }
+
/* ignore prefx operation */
break;
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 592ac04..84ac523 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -72,7 +72,7 @@
return 0;
}
-#ifdef CONFIG_MIPS_VPE_LOADER
+#ifdef CONFIG_MIPS_VPE_LOADER_CMP
int vpe_run(struct vpe *v)
{
struct vpe_notifications *n;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index ca3e3a4..2242181 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -119,7 +119,7 @@
do_IRQ(MALTA_INT_BASE + irq);
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_MT
if (aprp_hook)
aprp_hook();
#endif
@@ -310,7 +310,7 @@
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
if (aprp_hook)
aprp_hook();
#endif
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index d37be36..2b91b0e 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -150,6 +150,7 @@
msg.address_lo =
((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
+ break;
case OCTEON_DMA_BAR_TYPE_BIG:
/* When using big bar, Bar 0 is based at 0 */
msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 637fe03..60d5d17 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -32,17 +32,6 @@
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg);
-/* #define CONFIG_PARISC_TMPALIAS */
-
-#ifdef CONFIG_PARISC_TMPALIAS
-void clear_user_highpage(struct page *page, unsigned long vaddr);
-#define clear_user_highpage clear_user_highpage
-struct vm_area_struct;
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma);
-#define __HAVE_ARCH_COPY_USER_HIGHPAGE
-#endif
-
/*
* These are used to make use of C type-checking..
*/
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 3516e0b..64f2992 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -191,8 +191,4 @@
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 4270679..265ae51 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -828,13 +828,13 @@
#define __NR_finit_module (__NR_Linux + 333)
#define __NR_sched_setattr (__NR_Linux + 334)
#define __NR_sched_getattr (__NR_Linux + 335)
+#define __NR_utimes (__NR_Linux + 336)
-#define __NR_Linux_syscalls (__NR_sched_getattr + 1)
+#define __NR_Linux_syscalls (__NR_utimes + 1)
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-#define __IGNORE_utimes /* utime */
#define HPUX_GATEWAY_ADDR 0xC0000004
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index ac87a40..a6ffc77 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -581,67 +581,3 @@
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
-
-#ifdef CONFIG_PARISC_TMPALIAS
-
-void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *vto;
- unsigned long flags;
-
- /* Clear using TMPALIAS region. The page doesn't need to
- be flushed but the kernel mapping needs to be purged. */
-
- vto = kmap_atomic(page);
-
- /* The PA-RISC 2.0 Architecture book states on page F-6:
- "Before a write-capable translation is enabled, *all*
- non-equivalently-aliased translations must be removed
- from the page table and purged from the TLB. (Note
- that the caches are not required to be flushed at this
- time.) Before any non-equivalent aliased translation
- is re-enabled, the virtual address range for the writeable
- page (the entire page) must be flushed from the cache,
- and the write-capable translation removed from the page
- table and purged from the TLB." */
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- purge_tlb_end(flags);
- preempt_disable();
- clear_user_page_asm(vto, vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- void *vfrom, *vto;
- unsigned long flags;
-
- /* Copy using TMPALIAS region. This has the advantage
- that the `from' page doesn't need to be flushed. However,
- the `to' page must be flushed in copy_user_page_asm since
- it can be used to bring in executable code. */
-
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- pdtlb_kernel(vfrom);
- purge_tlb_end(flags);
- preempt_disable();
- copy_user_page_asm(vto, vfrom, vaddr);
- flush_dcache_page_asm(__pa(vto), vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-#endif /* CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 8fa3fbb..80e5dd2 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -431,6 +431,7 @@
ENTRY_SAME(finit_module)
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
+ ENTRY_COMP(utimes)
/* Nothing yet */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8d4c247f1..af064d2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1048,6 +1048,15 @@
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
+ /*
+ * Flush TM state out so we can copy it. __switch_to_tm() does this
+ * flush but it removes the checkpointed state from the current CPU and
+ * transitions the CPU out of TM mode. Hence we need to call
+ * tm_recheckpoint_new_task() (on the same task) to restore the
+ * checkpointed state back and the TM mode.
+ */
+ __switch_to_tm(src);
+ tm_recheckpoint_new_task(src);
*dst = *src;
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index 1482327..d88736f 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -81,6 +81,7 @@
6: blr
+.balign 8
p_dyn: .llong __dynamic_start - 0b
p_rela: .llong __rela_dyn_start - 0b
p_st: .llong _stext - 0b
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e66d4ec..818dce3 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1504,73 +1504,6 @@
1: addi r8,r8,16
.endr
- /* Save DEC */
- mfspr r5,SPRN_DEC
- mftb r6
- extsw r5,r5
- add r5,r5,r6
- std r5,VCPU_DEC_EXPIRES(r9)
-
-BEGIN_FTR_SECTION
- b 8f
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
- /* Save POWER8-specific registers */
- mfspr r5, SPRN_IAMR
- mfspr r6, SPRN_PSPB
- mfspr r7, SPRN_FSCR
- std r5, VCPU_IAMR(r9)
- stw r6, VCPU_PSPB(r9)
- std r7, VCPU_FSCR(r9)
- mfspr r5, SPRN_IC
- mfspr r6, SPRN_VTB
- mfspr r7, SPRN_TAR
- std r5, VCPU_IC(r9)
- std r6, VCPU_VTB(r9)
- std r7, VCPU_TAR(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- mfspr r5, SPRN_TFHAR
- mfspr r6, SPRN_TFIAR
- mfspr r7, SPRN_TEXASR
- std r5, VCPU_TFHAR(r9)
- std r6, VCPU_TFIAR(r9)
- std r7, VCPU_TEXASR(r9)
-#endif
- mfspr r8, SPRN_EBBHR
- std r8, VCPU_EBBHR(r9)
- mfspr r5, SPRN_EBBRR
- mfspr r6, SPRN_BESCR
- mfspr r7, SPRN_CSIGR
- mfspr r8, SPRN_TACR
- std r5, VCPU_EBBRR(r9)
- std r6, VCPU_BESCR(r9)
- std r7, VCPU_CSIGR(r9)
- std r8, VCPU_TACR(r9)
- mfspr r5, SPRN_TCSCR
- mfspr r6, SPRN_ACOP
- mfspr r7, SPRN_PID
- mfspr r8, SPRN_WORT
- std r5, VCPU_TCSCR(r9)
- std r6, VCPU_ACOP(r9)
- stw r7, VCPU_GUEST_PID(r9)
- std r8, VCPU_WORT(r9)
-8:
-
- /* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
- mfspr r5,SPRN_AMR
- mfspr r6,SPRN_UAMOR
- std r5,VCPU_AMR(r9)
- std r6,VCPU_UAMOR(r9)
- li r6,0
- mtspr SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
/* Unset guest mode */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
@@ -2203,7 +2136,7 @@
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
- stw r6,VCPU_VRSAVE(r3)
+ stw r6,VCPU_VRSAVE(r31)
mtlr r30
mtmsrd r5
isync
@@ -2240,7 +2173,7 @@
bl .load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- lwz r7,VCPU_VRSAVE(r4)
+ lwz r7,VCPU_VRSAVE(r31)
mtspr SPRN_VRSAVE,r7
mtlr r30
mr r4,r31
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 5ec1e47..e865d74 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,8 @@
area->nid = nid;
area->order = order;
- area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE,
+ area->pages = alloc_pages_exact_node(area->nid,
+ GFP_KERNEL|__GFP_THISNODE,
area->order);
if (!area->pages) {
diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h
index 673515b..aa1b2b9 100644
--- a/arch/sh/include/cpu-sh2/cpu/cache.h
+++ b/arch/sh/include/cpu-sh2/cpu/cache.h
@@ -18,7 +18,7 @@
#define SH_CACHE_ASSOC 8
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
-#define CCR 0xffffffec
+#define SH_CCR 0xffffffec
#define CCR_CACHE_CE 0x01 /* Cache enable */
#define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h
index defb0ba..b27ce92 100644
--- a/arch/sh/include/cpu-sh2a/cpu/cache.h
+++ b/arch/sh/include/cpu-sh2a/cpu/cache.h
@@ -17,8 +17,8 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xfffc1000 /* CCR1 */
-#define CCR2 0xfffc1004
+#define SH_CCR 0xfffc1000 /* CCR1 */
+#define SH_CCR2 0xfffc1004
/*
* Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h
index bee2d81..29700fd 100644
--- a/arch/sh/include/cpu-sh3/cpu/cache.h
+++ b/arch/sh/include/cpu-sh3/cpu/cache.h
@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xffffffec /* Address of Cache Control Register */
+#define SH_CCR 0xffffffec /* Address of Cache Control Register */
#define CCR_CACHE_CE 0x01 /* Cache Enable */
#define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h
index 7bfb9e8..92c4cd1 100644
--- a/arch/sh/include/cpu-sh4/cpu/cache.h
+++ b/arch/sh/include/cpu-sh4/cpu/cache.h
@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xff00001c /* Address of Cache Control Register */
+#define SH_CCR 0xff00001c /* Address of Cache Control Register */
#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ecf83cd..0d7360d 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -112,7 +112,7 @@
unsigned long ccr, flags;
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
/*
* At this point we don't know whether the cache is enabled or not - a
@@ -189,7 +189,7 @@
l2_cache_init();
- __raw_writel(flags, CCR);
+ __raw_writel(flags, SH_CCR);
back_to_cached();
}
#else
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 1157251..777e50f 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -36,7 +36,7 @@
*/
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
if ((ccr & CCR_CACHE_ENABLE) == 0) {
back_to_cached();
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index defcf71..a74259f 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -63,9 +63,9 @@
local_irq_save(flags);
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_INVALIDATE;
- __raw_writel(ccr, CCR);
+ __raw_writel(ccr, SH_CCR);
back_to_cached();
local_irq_restore(flags);
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
index 949e2d3..ee87d08 100644
--- a/arch/sh/mm/cache-sh2a.c
+++ b/arch/sh/mm/cache-sh2a.c
@@ -134,7 +134,8 @@
/* If there are too many pages then just blow the cache */
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
- __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
+ __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
+ SH_CCR);
} else {
for (v = begin; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
@@ -167,7 +168,8 @@
/* I-Cache invalidate */
/* If there are too many pages then just blow the cache */
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
- __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
+ __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
+ SH_CCR);
} else {
for (v = start; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 0e52928..51d8f7f 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -133,9 +133,9 @@
jump_to_uncached();
/* Flush I-cache */
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_ICI;
- __raw_writel(ccr, CCR);
+ __raw_writel(ccr, SH_CCR);
/*
* back_to_cached() will take care of the barrier for us, don't add
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
index c0adbee..24c58b7 100644
--- a/arch/sh/mm/cache-shx3.c
+++ b/arch/sh/mm/cache-shx3.c
@@ -19,7 +19,7 @@
{
unsigned int ccr;
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
/*
* If we've got cache aliases, resolve them in hardware.
@@ -40,5 +40,5 @@
ccr |= CCR_CACHE_IBE;
#endif
- writel_uncached(ccr, CCR);
+ writel_uncached(ccr, SH_CCR);
}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 616966a..097c2cd 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -285,8 +285,8 @@
{
unsigned int cache_disabled = 0;
-#ifdef CCR
- cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
+#ifdef SH_CCR
+ cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
#endif
compute_alias(&boot_cpu_data.icache);
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 32a280e..d7b4967 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -58,9 +58,12 @@
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
+ local_irq_enable();
} else {
unsigned long pstate;
+ local_irq_enable();
+
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@@ -82,7 +85,6 @@
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
- local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 87729ff..33a17e7 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -189,7 +189,8 @@
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
- ba,a,pt %xcc, 3f
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -217,7 +218,6 @@
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 3b3a360..f5d506f 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -273,7 +273,7 @@
prom_halt();
}
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
unsigned long size = 8192 << i;
const char *name = tsb_cache_names[i];
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c026cca..f3aaf23 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -341,10 +341,6 @@
def_bool y
depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
-config X86_OOSTORE
- def_bool y
- depends on (MWINCHIP3D || MWINCHIPC6) && MTRR
-
#
# P6_NOPs are a relatively minor optimization that require a family >=
# 6 processor, except that it is broken on certain VIA chips.
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 04a4890..69bbb48 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -85,11 +85,7 @@
#else
# define smp_rmb() barrier()
#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() wmb()
-#else
-# define smp_wmb() barrier()
-#endif
+#define smp_wmb() barrier()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
@@ -100,7 +96,7 @@
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+#if defined(CONFIG_X86_PPRO_FENCE)
/*
* For either of these options x86 doesn't have a strong TSO memory
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 3d6b9f8..acd86c8 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -134,6 +134,7 @@
extern void __init old_map_region(efi_memory_desc_t *md);
extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_mkexec(void);
+extern void __init efi_apply_memmap_quirks(void);
struct efi_setup_data {
u64 fw_vendor;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 34f69cb..91d9c69 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -237,7 +237,7 @@
static inline void flush_write_buffers(void)
{
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+#if defined(CONFIG_X86_PPRO_FENCE)
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
#endif
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5ad38ad..bbc8b12 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,20 +445,10 @@
return a.pte == b.pte;
}
-static inline int pteval_present(pteval_t pteval)
-{
- /*
- * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
- * way clearly states that the intent is that protnone and numa
- * hinting ptes are considered present for the purposes of
- * pagetable operations like zapping, protection changes, gup etc.
- */
- return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
-}
-
static inline int pte_present(pte_t a)
{
- return pteval_present(pte_flags(a));
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+ _PAGE_NUMA);
}
#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index bf156de..0f62f54 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,10 +26,9 @@
# define LOCK_PTR_REG "D"
#endif
-#if defined(CONFIG_X86_32) && \
- (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
+#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
/*
- * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
+ * On PPro SMP, we use a locked operation to unlock
* (PPro errata 66, 92)
*/
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index d35f24e..1306d11 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -119,9 +119,10 @@
extern const struct cpumask *cpu_coregroup_mask(int cpu);
-#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#endif
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index fd972a3..9fa8aa0 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -18,7 +18,6 @@
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/bitops.h>
-#include <linux/ioport.h>
#include <linux/suspend.h>
#include <asm/e820.h>
#include <asm/io.h>
@@ -54,18 +53,6 @@
int fix_aperture __initdata = 1;
-static struct resource gart_resource = {
- .name = "GART",
- .flags = IORESOURCE_MEM,
-};
-
-static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
-{
- gart_resource.start = aper_base;
- gart_resource.end = aper_base + aper_size - 1;
- insert_resource(&iomem_resource, &gart_resource);
-}
-
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
@@ -96,7 +83,6 @@
memblock_reserve(addr, aper_size);
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, addr);
- insert_aperture_resource((u32)addr, aper_size);
register_nosave_region(addr >> PAGE_SHIFT,
(addr+aper_size) >> PAGE_SHIFT);
@@ -444,12 +430,8 @@
out:
if (!fix && !fallback_aper_force) {
- if (last_aper_base) {
- unsigned long n = (32 * 1024 * 1024) << last_aper_order;
-
- insert_aperture_resource((u32)last_aper_base, n);
+ if (last_aper_base)
return 1;
- }
return 0;
}
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 8779eda..d8fba5c 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -8,236 +8,6 @@
#include "cpu.h"
-#ifdef CONFIG_X86_OOSTORE
-
-static u32 power2(u32 x)
-{
- u32 s = 1;
-
- while (s <= x)
- s <<= 1;
-
- return s >>= 1;
-}
-
-
-/*
- * Set up an actual MCR
- */
-static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
-{
- u32 lo, hi;
-
- hi = base & ~0xFFF;
- lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
- lo &= ~0xFFF; /* Remove the ctrl value bits */
- lo |= key; /* Attribute we wish to set */
- wrmsr(reg+MSR_IDT_MCR0, lo, hi);
- mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
-}
-
-/*
- * Figure what we can cover with MCR's
- *
- * Shortcut: We know you can't put 4Gig of RAM on a winchip
- */
-static u32 ramtop(void)
-{
- u32 clip = 0xFFFFFFFFUL;
- u32 top = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
-
- if (e820.map[i].addr > 0xFFFFFFFFUL)
- continue;
- /*
- * Don't MCR over reserved space. Ignore the ISA hole
- * we frob around that catastrophe already
- */
- if (e820.map[i].type == E820_RESERVED) {
- if (e820.map[i].addr >= 0x100000UL &&
- e820.map[i].addr < clip)
- clip = e820.map[i].addr;
- continue;
- }
- start = e820.map[i].addr;
- end = e820.map[i].addr + e820.map[i].size;
- if (start >= end)
- continue;
- if (end > top)
- top = end;
- }
- /*
- * Everything below 'top' should be RAM except for the ISA hole.
- * Because of the limited MCR's we want to map NV/ACPI into our
- * MCR range for gunk in RAM
- *
- * Clip might cause us to MCR insufficient RAM but that is an
- * acceptable failure mode and should only bite obscure boxes with
- * a VESA hole at 15Mb
- *
- * The second case Clip sometimes kicks in is when the EBDA is marked
- * as reserved. Again we fail safe with reasonable results
- */
- if (top > clip)
- top = clip;
-
- return top;
-}
-
-/*
- * Compute a set of MCR's to give maximum coverage
- */
-static int centaur_mcr_compute(int nr, int key)
-{
- u32 mem = ramtop();
- u32 root = power2(mem);
- u32 base = root;
- u32 top = root;
- u32 floor = 0;
- int ct = 0;
-
- while (ct < nr) {
- u32 fspace = 0;
- u32 high;
- u32 low;
-
- /*
- * Find the largest block we will fill going upwards
- */
- high = power2(mem-top);
-
- /*
- * Find the largest block we will fill going downwards
- */
- low = base/2;
-
- /*
- * Don't fill below 1Mb going downwards as there
- * is an ISA hole in the way.
- */
- if (base <= 1024*1024)
- low = 0;
-
- /*
- * See how much space we could cover by filling below
- * the ISA hole
- */
-
- if (floor == 0)
- fspace = 512*1024;
- else if (floor == 512*1024)
- fspace = 128*1024;
-
- /* And forget ROM space */
-
- /*
- * Now install the largest coverage we get
- */
- if (fspace > high && fspace > low) {
- centaur_mcr_insert(ct, floor, fspace, key);
- floor += fspace;
- } else if (high > low) {
- centaur_mcr_insert(ct, top, high, key);
- top += high;
- } else if (low > 0) {
- base -= low;
- centaur_mcr_insert(ct, base, low, key);
- } else
- break;
- ct++;
- }
- /*
- * We loaded ct values. We now need to set the mask. The caller
- * must do this bit.
- */
- return ct;
-}
-
-static void centaur_create_optimal_mcr(void)
-{
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining and weak write ordered.
- *
- * To experiment with: Linux never uses stack operations for
- * mmio spaces so we could globally enable stack operation wc
- *
- * Load the registers with type 31 - full write combining, all
- * writes weakly ordered.
- */
- used = centaur_mcr_compute(6, 31);
-
- /*
- * Wipe unused MCRs
- */
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-static void winchip2_create_optimal_mcr(void)
-{
- u32 lo, hi;
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining, weak store ordered.
- *
- * Load the registers with type 25
- * 8 - weak write ordering
- * 16 - weak read ordering
- * 1 - write combining
- */
- used = centaur_mcr_compute(6, 25);
-
- /*
- * Mark the registers we are using.
- */
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- for (i = 0; i < used; i++)
- lo |= 1<<(9+i);
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-
- /*
- * Wipe unused MCRs
- */
-
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-/*
- * Handle the MCR key on the Winchip 2.
- */
-static void winchip2_unprotect_mcr(void)
-{
- u32 lo, hi;
- u32 key;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- key = (lo>>17) & 7;
- lo |= key<<6; /* replace with unlock key */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-
-static void winchip2_protect_mcr(void)
-{
- u32 lo, hi;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-#endif /* CONFIG_X86_OOSTORE */
-
#define ACE_PRESENT (1 << 6)
#define ACE_ENABLED (1 << 7)
#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
@@ -362,20 +132,6 @@
fcr_clr = DPDC;
printk(KERN_NOTICE "Disabling bugged TSC.\n");
clear_cpu_cap(c, X86_FEATURE_TSC);
-#ifdef CONFIG_X86_OOSTORE
- centaur_create_optimal_mcr();
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- *
- * The C6 original lacks weak read order
- *
- * Note 0x120 is write only on Winchip 1
- */
- wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
-#endif
break;
case 8:
switch (c->x86_mask) {
@@ -392,40 +148,12 @@
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
E2MMX|EAMD3D;
fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
break;
case 9:
name = "3";
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
E2MMX|EAMD3D;
fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
break;
default:
name = "??";
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index c88f7f4..047f540 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3334,6 +3334,8 @@
if (!pmus)
return -ENOMEM;
+ type->pmus = pmus;
+
type->unconstrainted = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
0, type->num_counters, 0, 0);
@@ -3369,7 +3371,6 @@
}
type->pmu_group = &uncore_pmu_attr_group;
- type->pmus = pmus;
return 0;
fail:
uncore_type_exit(type);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 81ba276..f36bd42 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -544,6 +544,10 @@
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
cld
+
+ cmpl $2,(%esp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,%ss:early_recursion_flag
je hlt_loop
incl %ss:early_recursion_flag
@@ -594,8 +598,9 @@
pop %edx
pop %ecx
pop %eax
- addl $8,%esp /* drop vector number and error code */
decl %ss:early_recursion_flag
+is_nmi:
+ addl $8,%esp /* drop vector number and error code */
iret
ENDPROC(early_idt_handler)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e1aabdb..a468c0a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -343,6 +343,9 @@
ENTRY(early_idt_handler)
cld
+ cmpl $2,(%rsp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,early_recursion_flag(%rip)
jz 1f
incl early_recursion_flag(%rip)
@@ -405,8 +408,9 @@
popq %rdx
popq %rcx
popq %rax
- addq $16,%rsp # drop vector number and error code
decl early_recursion_flag(%rip)
+is_nmi:
+ addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN
ENDPROC(early_idt_handler)
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index e8368c6..d5dd808 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -86,10 +86,19 @@
void __kernel_fpu_end(void)
{
- if (use_eager_fpu())
- math_state_restore();
- else
+ if (use_eager_fpu()) {
+ /*
+ * For eager fpu, most the time, tsk_used_math() is true.
+ * Restore the user math as we are done with the kernel usage.
+ * At few instances during thread exit, signal handling etc,
+ * tsk_used_math() is false. Those few places will take proper
+ * actions, so we don't need to restore the math here.
+ */
+ if (likely(tsk_used_math(current)))
+ math_state_restore();
+ } else {
stts();
+ }
}
EXPORT_SYMBOL(__kernel_fpu_end);
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 7c6acd4..ff898bb 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -529,7 +529,7 @@
return;
pci_read_config_dword(nb_ht, 0x60, &val);
- node = val & 7;
+ node = pcibus_to_node(dev->bus) | (val & 7);
/*
* Some hardware may return an invalid node ID,
* so check it first:
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 06853e6..ce72964 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1239,14 +1239,8 @@
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
- /* Once setup is done above, unmap the EFI memory map on
- * mismatched firmware/kernel archtectures since there is no
- * support for runtime services.
- */
- if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
- pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
- efi_unmap_memmap();
- }
+ if (efi_enabled(EFI_BOOT))
+ efi_apply_memmap_quirks();
#endif
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e81df8f..2de1bc0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3002,10 +3002,8 @@
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
- if (irqchip_in_kernel(svm->vcpu.kvm)) {
- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ if (irqchip_in_kernel(svm->vcpu.kvm))
return r;
- }
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -3567,6 +3565,8 @@
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
+ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+
if (irr == -1)
return;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6dea040..a10c8c7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1020,13 +1020,17 @@
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
+ *
+ * This function must have noinline because both callers
+ * {,trace_}do_page_fault() have notrace on. Having this an actual function
+ * guarantees there's a function trace entry.
*/
-static void __kprobes
-__do_page_fault(struct pt_regs *regs, unsigned long error_code)
+static void __kprobes noinline
+__do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
- unsigned long address;
struct mm_struct *mm;
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -1034,9 +1038,6 @@
tsk = current;
mm = tsk->mm;
- /* Get the faulting address: */
- address = read_cr2();
-
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
@@ -1248,32 +1249,50 @@
up_read(&mm->mmap_sem);
}
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
+ unsigned long address = read_cr2(); /* Get the faulting address */
enum ctx_state prev_state;
+ /*
+ * We must have this function tagged with __kprobes, notrace and call
+ * read_cr2() before calling anything else. To avoid calling any kind
+ * of tracing machinery before we've observed the CR2 value.
+ *
+ * exception_{enter,exit}() contain all sorts of tracepoints.
+ */
+
prev_state = exception_enter();
- __do_page_fault(regs, error_code);
+ __do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
-static void trace_page_fault_entries(struct pt_regs *regs,
+#ifdef CONFIG_TRACING
+static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
{
if (user_mode(regs))
- trace_page_fault_user(read_cr2(), regs, error_code);
+ trace_page_fault_user(address, regs, error_code);
else
- trace_page_fault_kernel(read_cr2(), regs, error_code);
+ trace_page_fault_kernel(address, regs, error_code);
}
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
+ /*
+ * The exception_enter and tracepoint processing could
+ * trigger another page faults (user space callchain
+ * reading) and destroy the original cr2 value, so read
+ * the faulting address now.
+ */
+ unsigned long address = read_cr2();
enum ctx_state prev_state;
prev_state = exception_enter();
- trace_page_fault_entries(regs, error_code);
- __do_page_fault(regs, error_code);
+ trace_page_fault_entries(address, regs, error_code);
+ __do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
+#endif /* CONFIG_TRACING */
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 877b9a1..0149575 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -140,7 +140,7 @@
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
- mov $SIZE,%ecx; /* size */ \
+ mov $SIZE,%edx; /* size */ \
call bpf_internal_load_pointer_neg_helper; \
test %rax,%rax; \
pop SKBDATA; \
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1a201ac..b97acec 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -52,6 +52,7 @@
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/rtc.h>
+#include <asm/uv/uv.h>
#define EFI_DEBUG
@@ -1210,3 +1211,22 @@
return 0;
}
early_param("efi", parse_efi_cmdline);
+
+void __init efi_apply_memmap_quirks(void)
+{
+ /*
+ * Once setup is done earlier, unmap the EFI memory map on mismatched
+ * firmware/kernel architectures since there is no support for runtime
+ * services.
+ */
+ if (!efi_is_native()) {
+ pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+ efi_unmap_memmap();
+ }
+
+ /*
+ * UV doesn't support the new EFI pagetable mapping yet.
+ */
+ if (is_uv_system())
+ set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+}
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 7d01b8c..cc04e67 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -40,11 +40,7 @@
#define smp_rmb() barrier()
#endif /* CONFIG_X86_PPRO_FENCE */
-#ifdef CONFIG_X86_OOSTORE
-#define smp_wmb() wmb()
-#else /* CONFIG_X86_OOSTORE */
#define smp_wmb() barrier()
-#endif /* CONFIG_X86_OOSTORE */
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 256282e..2423ef0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@@ -381,7 +381,7 @@
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- if (pteval_present(val)) {
+ if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
diff --git a/block/blk-core.c b/block/blk-core.c
index 853f927..bfe16d5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,20 +693,11 @@
if (!uninit_q)
return NULL;
- uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
- if (!uninit_q->flush_rq)
- goto out_cleanup_queue;
-
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- goto out_free_flush_rq;
- return q;
+ blk_cleanup_queue(uninit_q);
-out_free_flush_rq:
- kfree(uninit_q->flush_rq);
-out_cleanup_queue:
- blk_cleanup_queue(uninit_q);
- return NULL;
+ return q;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -717,9 +708,13 @@
if (!q)
return NULL;
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!q->flush_rq)
return NULL;
+ if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ goto fail;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
@@ -742,12 +737,16 @@
/* init elevator */
if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
- return NULL;
+ goto fail;
}
mutex_unlock(&q->sysfs_lock);
return q;
+
+fail:
+ kfree(q->flush_rq);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c68613b..dbf4502 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, at_head, true);
+ blk_mq_insert_request(rq, at_head, true, false);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 66e2b69..43e6b47 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -137,17 +137,20 @@
rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
- blk_mq_run_request(rq, true, false);
+ blk_mq_insert_request(rq, false, true, false);
}
-static bool blk_flush_queue_rq(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
INIT_WORK(&rq->mq_flush_work, mq_flush_run);
kblockd_schedule_work(rq->q, &rq->mq_flush_work);
return false;
} else {
- list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ if (add_front)
+ list_add(&rq->queuelist, &rq->q->queue_head);
+ else
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
return true;
}
}
@@ -193,7 +196,7 @@
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- queued = blk_flush_queue_rq(rq);
+ queued = blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -326,7 +329,7 @@
q->flush_rq->rq_disk = first_rq->rq_disk;
q->flush_rq->end_io = flush_end_io;
- return blk_flush_queue_rq(q->flush_rq);
+ return blk_flush_queue_rq(q->flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -411,7 +414,7 @@
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
- blk_mq_run_request(rq, false, true);
+ blk_mq_insert_request(rq, false, false, true);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 3146bef..136ef86 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -11,7 +11,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
}
@@ -32,16 +32,16 @@
{
BUG_ON(!notifier->notify);
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_del(¬ifier->list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1fa9dd1..883f720 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -73,8 +73,8 @@
set_bit(ctx->index_hw, hctx->ctx_map);
}
-static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
- bool reserved)
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ gfp_t gfp, bool reserved)
{
struct request *rq;
unsigned int tag;
@@ -193,12 +193,6 @@
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
- gfp_t gfp, bool reserved)
-{
- return blk_mq_alloc_rq(hctx, gfp, reserved);
-}
-
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
int rw, gfp_t gfp,
bool reserved)
@@ -289,38 +283,10 @@
__blk_mq_free_request(hctx, ctx, rq);
}
-static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
+bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
- if (error)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = -EIO;
-
- if (unlikely(rq->cmd_flags & REQ_QUIET))
- set_bit(BIO_QUIET, &bio->bi_flags);
-
- /* don't actually finish bio if it's part of flush sequence */
- if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
- bio_endio(bio, error);
-}
-
-void blk_mq_end_io(struct request *rq, int error)
-{
- struct bio *bio = rq->bio;
- unsigned int bytes = 0;
-
- trace_block_rq_complete(rq->q, rq);
-
- while (bio) {
- struct bio *next = bio->bi_next;
-
- bio->bi_next = NULL;
- bytes += bio->bi_iter.bi_size;
- blk_mq_bio_endio(rq, bio, error);
- bio = next;
- }
-
- blk_account_io_completion(rq, bytes);
+ if (blk_update_request(rq, error, blk_rq_bytes(rq)))
+ return true;
blk_account_io_done(rq);
@@ -328,8 +294,9 @@
rq->end_io(rq, error);
else
blk_mq_free_request(rq);
+ return false;
}
-EXPORT_SYMBOL(blk_mq_end_io);
+EXPORT_SYMBOL(blk_mq_end_io_partial);
static void __blk_mq_complete_request_remote(void *data)
{
@@ -730,60 +697,27 @@
blk_mq_add_timer(rq);
}
-void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool at_head, bool run_queue)
-{
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
-
- ctx = rq->mq_ctx;
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
- blk_insert_flush(rq);
- } else {
- current_ctx = blk_mq_get_ctx(q);
-
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq->mq_ctx = ctx;
- }
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, at_head);
- spin_unlock(&ctx->lock);
-
- blk_mq_put_ctx(current_ctx);
- }
-
- if (run_queue)
- __blk_mq_run_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_insert_request);
-
-/*
- * This is a special version of blk_mq_insert_request to bypass FLUSH request
- * check. Should only be used internally.
- */
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
+void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+ bool async)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
+ struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
current_ctx = blk_mq_get_ctx(q);
+ if (!cpu_online(ctx->cpu))
+ rq->mq_ctx = ctx = current_ctx;
- ctx = rq->mq_ctx;
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- rq->mq_ctx = ctx;
- }
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- /* ctx->cpu might be offline */
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, false);
- spin_unlock(&ctx->lock);
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+ !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
+ blk_insert_flush(rq);
+ } else {
+ spin_lock(&ctx->lock);
+ __blk_mq_insert_request(hctx, rq, at_head);
+ spin_unlock(&ctx->lock);
+ }
blk_mq_put_ctx(current_ctx);
@@ -926,6 +860,8 @@
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ if (is_sync)
+ rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index ed0035c..72beba1 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -23,7 +23,6 @@
};
void __blk_mq_complete_request(struct request *rq);
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 959d41a..d7d32c2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -67,6 +67,8 @@
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
+ * when trying to clear the EC */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -116,6 +118,7 @@
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
Transaction Management
@@ -440,6 +443,29 @@
EXPORT_SYMBOL(ec_get_handle);
+static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
+
+/*
+ * Clears stale _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+static void acpi_ec_clear(struct acpi_ec *ec)
+{
+ int i, status;
+ u8 value = 0;
+
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (status || !value)
+ break;
+ }
+
+ if (unlikely(i == ACPI_EC_CLEAR_MAX))
+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
+ else
+ pr_info("%d stale EC events cleared\n", i);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -463,6 +489,10 @@
mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+
+ if (EC_FLAGS_CLEAR_ON_RESUME)
+ acpi_ec_clear(ec);
+
mutex_unlock(&ec->mutex);
}
@@ -821,6 +851,13 @@
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+
+ /* Clear stale _Q events if hardware might require that */
+ if (EC_FLAGS_CLEAR_ON_RESUME) {
+ mutex_lock(&ec->mutex);
+ acpi_ec_clear(ec);
+ mutex_unlock(&ec->mutex);
+ }
return ret;
}
@@ -922,6 +959,30 @@
return 0;
}
+/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ *
+ * Ideally, the EC should also be instructed NOT to accumulate events during
+ * sleep (which Windows seems to do somehow), but the interface to control this
+ * behaviour is not known at this time.
+ *
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
+ * however it is very likely that other Samsung models are affected.
+ *
+ * On systems which don't accumulate _Q events during sleep, this extra check
+ * should be harmless.
+ */
+static int ec_clear_on_resume(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing EC poll on resume.\n");
+ EC_FLAGS_CLEAR_ON_RESUME = 1;
+ return 0;
+}
+
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
ec_skip_dsdt_scan, "Compal JFL92", {
@@ -965,6 +1026,9 @@
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+ {
+ ec_clear_on_resume, "Samsung hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b7201fc..0bdacc5 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,18 +77,24 @@
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
+ if (!memory24->address_length)
+ return false;
acpi_dev_get_memresource(res, memory24->minimum,
memory24->address_length,
memory24->write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
+ if (!memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
memory32->write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
+ if (!fixed_memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
fixed_memory32->write_protect);
@@ -144,12 +150,16 @@
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
+ if (!io->address_length)
+ return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
io->io_decode);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
+ if (!fixed_io->address_length)
+ return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
ACPI_DECODE_10);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index b718806..c40fb2e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -71,6 +71,17 @@
return 0;
}
+static bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
+ return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
+ || (acpi_gbl_FADT.sleep_control.address
+ && acpi_gbl_FADT.sleep_status.address));
+}
+
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
@@ -604,15 +615,9 @@
{
int i;
- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
+ if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
- }
- }
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
@@ -740,11 +745,7 @@
static void acpi_sleep_hibernate_setup(void)
{
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
- if (ACPI_FAILURE(status))
+ if (!acpi_sleep_state_supported(ACPI_STATE_S4))
return;
hibernation_set_ops(old_suspend_ordering ?
@@ -793,8 +794,6 @@
int __init acpi_sleep_init(void)
{
- acpi_status status;
- u8 type_a, type_b;
char supported[ACPI_S_STATE_COUNT * 3 + 1];
char *pos = supported;
int i;
@@ -806,8 +805,7 @@
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
- status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
sleep_states[ACPI_STATE_S5] = 1;
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1a3dbd1..8cb2522 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4175,6 +4175,7 @@
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
@@ -4224,7 +4225,7 @@
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8184451..422b7d8 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -874,7 +874,7 @@
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
- page = compound_trans_head(bv.bv_page);
+ page = compound_head(bv.bv_page);
atomic_inc(&page->_count);
}
}
@@ -887,7 +887,7 @@
struct bvec_iter iter;
bio_for_each_segment(bv, bio, iter) {
- page = compound_trans_head(bv.bv_page);
+ page = compound_head(bv.bv_page);
atomic_dec(&page->_count);
}
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 5160269..d777bb7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4498,7 +4498,7 @@
}
dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
- cpu_to_node(smp_processor_id()), smp_processor_id());
+ cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (dd == NULL) {
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b52e9a6..54174cb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -53,7 +53,7 @@
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
/* unaligned IO handling */
-#define MTIP_MAX_UNALIGNED_SLOTS 8
+#define MTIP_MAX_UNALIGNED_SLOTS 2
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 011e55d..51c557c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -612,6 +612,8 @@
disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize);
+ if (!meta)
+ return -ENOMEM;
down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index bd313f7..c1af80b 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -242,7 +242,7 @@
irq = irq_of_parse_and_map(np, 0);
if (!irq)
- return;
+ goto out_free_characteristics;
clk = at91_clk_register_master(pmc, irq, name, num_parents,
parent_names, layout,
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 6a934a5..05e04ce 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -494,6 +494,9 @@
static int __init nomadik_src_clk_init_debugfs(void)
{
+ /* Vital for multiplatform */
+ if (!src_base)
+ return -ENODEV;
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5517944..c42e608 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2226,24 +2226,25 @@
*/
int __clk_get(struct clk *clk)
{
- if (clk && !try_module_get(clk->owner))
- return 0;
+ if (clk) {
+ if (!try_module_get(clk->owner))
+ return 0;
- kref_get(&clk->ref);
+ kref_get(&clk->ref);
+ }
return 1;
}
void __clk_put(struct clk *clk)
{
- if (WARN_ON_ONCE(IS_ERR(clk)))
+ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_prepare_lock();
kref_put(&clk->ref, __clk_release);
clk_prepare_unlock();
- if (clk)
- module_put(clk->owner);
+ module_put(clk->owner);
}
/*** clk rate change notifiers ***/
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index 17a5983..86f1e36 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -179,6 +179,7 @@
init.name = name;
init.ops = &clk_psc_ops;
+ init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 81a202d..bef198a 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -141,13 +141,6 @@
.num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
};
-static void __init a370_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &a370_coreclks);
-}
-CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
- a370_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -168,9 +161,15 @@
{ }
};
-static void __init a370_clk_gating_init(struct device_node *np)
+static void __init a370_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, a370_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock");
+
+ mvebu_coreclk_setup(np, &a370_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, a370_gating_desc);
}
-CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock",
- a370_clk_gating_init);
+CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
+
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 9922c44..b309431 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -158,13 +158,6 @@
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
};
-static void __init axp_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &axp_coreclks);
-}
-CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
- axp_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -202,9 +195,14 @@
{ }
};
-static void __init axp_clk_gating_init(struct device_node *np)
+static void __init axp_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, axp_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock");
+
+ mvebu_coreclk_setup(np, &axp_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, axp_gating_desc);
}
-CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock",
- axp_clk_gating_init);
+CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 38aee1e..b8c2424 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -154,12 +154,6 @@
.num_ratios = ARRAY_SIZE(dove_coreclk_ratios),
};
-static void __init dove_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &dove_coreclks);
-}
-CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -186,9 +180,14 @@
{ }
};
-static void __init dove_clk_gating_init(struct device_node *np)
+static void __init dove_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, dove_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock");
+
+ mvebu_coreclk_setup(np, &dove_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, dove_gating_desc);
}
-CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock",
- dove_clk_gating_init);
+CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 2636a55..ddb666a 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -193,13 +193,6 @@
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init kirkwood_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &kirkwood_coreclks);
-}
-CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock",
- kirkwood_coreclk_init);
-
static const struct coreclk_soc_desc mv88f6180_coreclks = {
.get_tclk_freq = kirkwood_get_tclk_freq,
.get_cpu_freq = mv88f6180_get_cpu_freq,
@@ -208,13 +201,6 @@
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init mv88f6180_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &mv88f6180_coreclks);
-}
-CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
- mv88f6180_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -239,9 +225,21 @@
{ }
};
-static void __init kirkwood_clk_gating_init(struct device_node *np)
+static void __init kirkwood_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, kirkwood_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock");
+
+
+ if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock"))
+ mvebu_coreclk_setup(np, &mv88f6180_coreclks);
+ else
+ mvebu_coreclk_setup(np, &kirkwood_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
}
-CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock",
- kirkwood_clk_gating_init);
+CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
+ kirkwood_clk_init);
+CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock",
+ kirkwood_clk_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index a59ec21..99c27b1 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -26,6 +26,8 @@
void __iomem *reg;
};
+#define CPG_FRQCRB 0x00000004
+#define CPG_FRQCRB_KICK BIT(31)
#define CPG_SDCKCR 0x00000074
#define CPG_PLL0CR 0x000000d8
#define CPG_FRQCRC 0x000000e0
@@ -45,6 +47,7 @@
struct cpg_z_clk {
struct clk_hw hw;
void __iomem *reg;
+ void __iomem *kick_reg;
};
#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
@@ -83,17 +86,45 @@
{
struct cpg_z_clk *zclk = to_z_clk(hw);
unsigned int mult;
- u32 val;
+ u32 val, kick;
+ unsigned int i;
mult = div_u64((u64)rate * 32, parent_rate);
mult = clamp(mult, 1U, 32U);
+ if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
val = clk_readl(zclk->reg);
val &= ~CPG_FRQCRC_ZFC_MASK;
val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
clk_writel(val, zclk->reg);
- return 0;
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = clk_readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ clk_writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependant of external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
}
static const struct clk_ops cpg_z_clk_ops = {
@@ -120,6 +151,7 @@
init.num_parents = 1;
zclk->reg = cpg->reg + CPG_FRQCRC;
+ zclk->kick_reg = cpg->reg + CPG_FRQCRB;
zclk->hw.init = &init;
clk = clk_register(NULL, &zclk->hw);
@@ -186,7 +218,7 @@
const char *name)
{
const struct clk_div_table *table = NULL;
- const char *parent_name = "main";
+ const char *parent_name;
unsigned int shift;
unsigned int mult = 1;
unsigned int div = 1;
@@ -201,23 +233,31 @@
* the multiplier value.
*/
u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ parent_name = "main";
mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
} else if (!strcmp(name, "pll1")) {
+ parent_name = "main";
mult = config->pll1_mult / 2;
} else if (!strcmp(name, "pll3")) {
+ parent_name = "main";
mult = config->pll3_mult;
} else if (!strcmp(name, "lb")) {
+ parent_name = "pll1_div2";
div = cpg_mode & BIT(18) ? 36 : 24;
} else if (!strcmp(name, "qspi")) {
+ parent_name = "pll1_div2";
div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
- ? 16 : 20;
+ ? 8 : 10;
} else if (!strcmp(name, "sdh")) {
+ parent_name = "pll1_div2";
table = cpg_sdh_div_table;
shift = 8;
} else if (!strcmp(name, "sd0")) {
+ parent_name = "pll1_div2";
table = cpg_sd01_div_table;
shift = 4;
} else if (!strcmp(name, "sd1")) {
+ parent_name = "pll1_div2";
table = cpg_sd01_div_table;
shift = 0;
} else if (!strcmp(name, "z")) {
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 4d75b1f..290f9c1 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -59,7 +59,7 @@
return 0;
if (divider_ux1 > get_max_div(divider))
- return -EINVAL;
+ return get_max_div(divider);
return divider_ux1;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index cf0c323..c39613c 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -180,9 +180,13 @@
tegra_clk_sbc6_8,
tegra_clk_sclk,
tegra_clk_sdmmc1,
+ tegra_clk_sdmmc1_8,
tegra_clk_sdmmc2,
+ tegra_clk_sdmmc2_8,
tegra_clk_sdmmc3,
+ tegra_clk_sdmmc3_8,
tegra_clk_sdmmc4,
+ tegra_clk_sdmmc4_8,
tegra_clk_se,
tegra_clk_soc_therm,
tegra_clk_sor0,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 5c35885..1fa5c3f 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -371,9 +371,7 @@
static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
"pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
};
-static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = {
- [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
-};
+#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL
static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
"pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
@@ -465,6 +463,10 @@
MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+ MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
+ MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
+ MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
+ MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -492,7 +494,7 @@
UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
- UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte),
+ UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte),
XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 05dce4a..feb3201 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -120,7 +120,7 @@
ARRAY_SIZE(cclk_lp_parents),
CLK_SET_RATE_PARENT,
clk_base + CCLKLP_BURST_POLICY,
- 0, 4, 8, 9, NULL);
+ TEGRA_DIVIDER_2, 4, 8, 9, NULL);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 90d9d25..80431f0 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -682,12 +682,12 @@
[tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
[tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
[tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
- [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
[tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
- [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
@@ -723,7 +723,7 @@
[tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
[tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
[tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
- [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
[tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
[tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
[tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index aff86b5..166e02f 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -516,11 +516,11 @@
};
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
- {12000000, 216000000, 432, 12, 1, 8},
- {13000000, 216000000, 432, 13, 1, 8},
- {16800000, 216000000, 360, 14, 1, 8},
- {19200000, 216000000, 360, 16, 1, 8},
- {26000000, 216000000, 432, 26, 1, 8},
+ {12000000, 408000000, 408, 12, 0, 8},
+ {13000000, 408000000, 408, 13, 0, 8},
+ {16800000, 408000000, 340, 14, 0, 8},
+ {19200000, 408000000, 340, 16, 0, 8},
+ {26000000, 408000000, 408, 26, 0, 8},
{0, 0, 0, 0, 0, 0},
};
@@ -570,6 +570,15 @@
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
+static struct div_nmp plld_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 11,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
{12000000, 216000000, 864, 12, 4, 12},
{13000000, 216000000, 864, 13, 4, 12},
@@ -603,19 +612,18 @@
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
- .div_nmp = &pllp_nmp,
+ .div_nmp = &plld_nmp,
.freq_table = pll_d_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
- { 12000000, 148500000, 99, 1, 8},
- { 12000000, 594000000, 99, 1, 1},
- { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */
- { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */
- { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */
- { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */
+ { 12000000, 594000000, 99, 1, 2},
+ { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */
+ { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */
{ 0, 0, 0, 0, 0, 0 },
};
@@ -753,21 +761,19 @@
[tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
[tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
- [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
[tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
- [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
- [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true },
[tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
[tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
- [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true },
[tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
[tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
- [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
+ [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
[tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
[tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
@@ -794,7 +800,7 @@
[tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
[tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
[tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
- [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
[tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
[tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
[tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
@@ -1286,9 +1292,9 @@
clk_register_clkdev(clk, "pll_d2", NULL);
clks[TEGRA124_CLK_PLL_D2] = clk;
- /* PLLD2_OUT0 ?? */
+ /* PLLD2_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
- CLK_SET_RATE_PARENT, 1, 2);
+ CLK_SET_RATE_PARENT, 1, 1);
clk_register_clkdev(clk, "pll_d2_out0", NULL);
clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index dbace15..dace2b1 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -574,6 +574,8 @@
[tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
[tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 02821b0..a918bc4 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -54,7 +54,7 @@
static u64 pit_read_sched_clock(void)
{
- return __raw_readl(clksrc_base + PITCVAL);
+ return ~__raw_readl(clksrc_base + PITCVAL);
}
static int __init pit_clocksource_init(unsigned long rate)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cb003a6..199b52b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1109,12 +1109,27 @@
goto err_set_policy_cpu;
}
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+ if (!frozen) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+ }
+
+ down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (cpufreq_driver->get) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
pr_err("%s: ->get() failed\n", __func__);
@@ -1162,20 +1177,6 @@
}
}
- /* related cpus should atleast have policy->cpus */
- cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
-
- /*
- * affected cpus must always be the one, which are online. We aren't
- * managing offline cpus here.
- */
- cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
-
- if (!frozen) {
- policy->user_policy.min = policy->min;
- policy->user_policy.max = policy->max;
- }
-
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@@ -1206,6 +1207,7 @@
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
+ up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@@ -1546,23 +1548,16 @@
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
- if (cpufreq_disabled() || !cpufreq_driver)
- return -ENOENT;
+ if (policy) {
+ down_read(&policy->rwsem);
+ ret_freq = __cpufreq_get(cpu);
+ up_read(&policy->rwsem);
- BUG_ON(!policy);
-
- if (!down_read_trylock(&cpufreq_rwsem))
- return 0;
-
- down_read(&policy->rwsem);
-
- ret_freq = __cpufreq_get(cpu);
-
- up_read(&policy->rwsem);
- up_read(&cpufreq_rwsem);
+ cpufreq_cpu_put(policy);
+ }
return ret_freq;
}
@@ -2148,7 +2143,7 @@
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
- if (cpufreq_driver->get) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
new_policy.cur = cpufreq_driver->get(cpu);
if (!policy->cur) {
pr_debug("Driver did not initialize current freq");
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index de4aa40..2c6d5e1 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -916,7 +916,7 @@
old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
- PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+ old->workfn = fw_device_update;
fw_schedule_device_work(old, 0);
if (current_node == card->root_node)
@@ -1075,7 +1075,7 @@
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1196,13 +1196,20 @@
dev_name(&device->device), fw_rcode_string(ret));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
}
+static void fw_device_workfn(struct work_struct *work)
+{
+ struct fw_device *device = container_of(to_delayed_work(work),
+ struct fw_device, work);
+ device->workfn(work);
+}
+
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
@@ -1252,7 +1259,8 @@
* power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset.
*/
- INIT_DELAYED_WORK(&device->work, fw_device_init);
+ device->workfn = fw_device_init;
+ INIT_DELAYED_WORK(&device->work, fw_device_workfn);
fw_schedule_device_work(device, INITIAL_DELAY);
break;
@@ -1268,7 +1276,7 @@
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+ device->workfn = fw_device_refresh;
fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY);
}
@@ -1283,7 +1291,7 @@
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+ device->workfn = fw_device_update;
fw_schedule_device_work(device, 0);
}
break;
@@ -1308,7 +1316,7 @@
device = node->data;
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 6b89598..4af0a7b 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -929,8 +929,6 @@
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
- fwnet_transmit_packet_failed(ptask);
-
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
dev_err(&ptask->dev->netdev->dev,
"fwnet_write_complete failed: %x (skipped %d)\n",
@@ -938,8 +936,10 @@
errors_skipped = 0;
last_rcode = rcode;
- } else
+ } else {
errors_skipped++;
+ }
+ fwnet_transmit_packet_failed(ptask);
}
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6f74d8d..8db6632 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,7 +290,6 @@
#define QUIRK_NO_MSI 0x10
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
-#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -303,10 +302,7 @@
QUIRK_BE_HEADERS},
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
- QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
-
- {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
- QUIRK_PHY_LCTRL_TIMEOUT},
+ QUIRK_NO_MSI},
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -353,7 +349,6 @@
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
- ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -2299,9 +2294,6 @@
* TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
* cannot actually use the phy at that time. These need tens of
* millisecods pause between LPS write and first phy access too.
- *
- * But do not wait for 50msec on Agere/LSI cards. Their phy
- * arbitration state machine may time out during such a long wait.
*/
reg_write(ohci, OHCI1394_HCControlSet,
@@ -2309,11 +2301,8 @@
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
- if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
+ for (lps = 0, i = 0; !lps && i < 3; i++) {
msleep(50);
-
- for (lps = 0, i = 0; !lps && i < 150; i++) {
- msleep(1);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 281029d..7aef911 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -146,6 +146,7 @@
*/
int generation;
int retries;
+ work_func_t workfn;
struct delayed_work work;
bool has_sdev;
bool blocked;
@@ -864,7 +865,7 @@
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu);
- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+ lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu);
/* This was a re-login. */
@@ -918,7 +919,7 @@
* If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login.
*/
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
static void sbp2_reconnect(struct work_struct *work)
@@ -952,7 +953,7 @@
lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
@@ -972,6 +973,13 @@
sbp2_conditionally_unblock(lu);
}
+static void sbp2_lu_workfn(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
+ struct sbp2_logical_unit, work);
+ lu->workfn(work);
+}
+
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{
struct sbp2_logical_unit *lu;
@@ -998,7 +1006,8 @@
lu->blocked = false;
++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
- INIT_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
+ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list);
return 0;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index acf3a36..32982da 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -68,15 +68,7 @@
{
struct armada_private *priv = dev->dev_private;
- /*
- * Yes, we really must jump through these hoops just to store a
- * _pointer_ to something into the kfifo. This is utterly insane
- * and idiotic, because it kfifo requires the _data_ pointed to by
- * the pointer const, not the pointer itself. Not only that, but
- * you have to pass a pointer _to_ the pointer you want stored.
- */
- const struct drm_framebuffer *silly_api_alert = fb;
- WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
schedule_work(&priv->fb_unref_work);
}
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index c8fcf12..5f8b0c2 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,6 +2,7 @@
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 5736aaa..f7af69b 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -468,8 +468,8 @@
} else {
list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
legacy_dev_list) {
- drm_put_dev(dev);
list_del(&dev->legacy_dev_list);
+ drm_put_dev(dev);
}
}
DRM_INFO("Module unloaded\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 215131a..c204b4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -172,20 +172,24 @@
ret = exynos_drm_subdrv_open(dev, file);
if (ret)
- goto out;
+ goto err_file_priv_free;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
ret = PTR_ERR(anon_filp);
- goto out;
+ goto err_subdrv_close;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
-out:
+
+err_subdrv_close:
+ exynos_drm_subdrv_close(dev, file);
+
+err_file_priv_free:
kfree(file_priv);
file->driver_priv = NULL;
return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 04f1f02..ec7bb0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -403,7 +403,7 @@
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pch;
+ struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
@@ -424,12 +424,9 @@
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- while (pch) {
- struct pci_dev *curr = pch;
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- unsigned short id;
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -461,18 +458,16 @@
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
- } else {
- goto check_next;
- }
- pci_dev_put(pch);
+ } else
+ continue;
+
break;
}
-check_next:
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
- pci_dev_put(curr);
}
if (!pch)
- DRM_DEBUG_KMS("No PCH found?\n");
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 40a2b36..d278be1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -842,7 +842,7 @@
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
- false);
+ true);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 1a24e84..28d24ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -82,9 +82,22 @@
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
- DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
- base = 0;
+ /*
+ * One more attempt but this time requesting region from
+ * base + 1, as we have seen that this resolves the region
+ * conflict with the PCI Bus.
+ * This is a BIOS w/a: Some BIOS wrap stolen in the root
+ * PCI bus, but have an off-by-one error. Hence retry the
+ * reservation starting from 1 instead of 0.
+ */
+ r = devm_request_mem_region(dev->dev, base + 1,
+ dev_priv->gtt.stolen_size - 1,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
}
return base;
@@ -201,6 +214,13 @@
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ return 0;
+ }
+#endif
+
if (dev_priv->gtt.stolen_size == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9fec711..d554169 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -618,33 +618,25 @@
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
+ int reg;
- if (INTEL_INFO(dev)->gen < 7) {
- status = pipe == PIPE_A ?
- DE_PIPEA_VBLANK :
- DE_PIPEB_VBLANK;
+ if (INTEL_INFO(dev)->gen >= 8) {
+ status = GEN8_PIPE_VBLANK;
+ reg = GEN8_DE_PIPE_ISR(pipe);
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ status = DE_PIPE_VBLANK_IVB(pipe);
+ reg = DEISR;
} else {
- switch (pipe) {
- default:
- case PIPE_A:
- status = DE_PIPEA_VBLANK_IVB;
- break;
- case PIPE_B:
- status = DE_PIPEB_VBLANK_IVB;
- break;
- case PIPE_C:
- status = DE_PIPEC_VBLANK_IVB;
- break;
- }
+ status = DE_PIPE_VBLANK(pipe);
+ reg = DEISR;
}
- return __raw_i915_read32(dev_priv, DEISR) & status;
+ return __raw_i915_read32(dev_priv, reg) & status;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -702,7 +694,28 @@
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_DDI(dev)) {
+ /*
+ * On HSW HDMI outputs there seems to be a 2 line
+ * difference, whereas eDP has the normal 1 line
+ * difference that earlier platforms have. External
+ * DP is unknown. For now just check for the 2 line
+ * difference case on all output types on HSW+.
+ *
+ * This might misinterpret the scanline counter being
+ * one line too far along on eDP, but that's less
+ * dangerous than the alternative since that would lead
+ * the vblank timestamp code astray when it sees a
+ * scanline count before vblank_start during a vblank
+ * interrupt.
+ */
+ in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+ if ((in_vbl && (position == vbl_start - 2 ||
+ position == vbl_start - 1)) ||
+ (!in_vbl && (position == vbl_end - 2 ||
+ position == vbl_end - 1)))
+ position = (position + 2) % vtotal;
+ } else if (HAS_PCH_SPLIT(dev)) {
/*
* The scanline counter increments at the leading edge
* of hsync, ie. it completely misses the active portion
@@ -2769,10 +2782,9 @@
return;
if (HAS_PCH_IBX(dev)) {
- mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
- SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+ mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
} else {
- mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
@@ -2832,20 +2844,19 @@
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
- DE_ERR_INT_IVB);
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB);
+ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A |
- DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
- extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
}
dev_priv->irq_mask = ~display_mask;
@@ -2961,9 +2972,9 @@
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
- GEN8_PIPE_FIFO_UNDERRUN |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+ uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
int pipe;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e06b9e0..234ac5f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1244,6 +1244,7 @@
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_off(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4c16728..9b8a7c7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1092,12 +1092,12 @@
struct drm_device *dev = dev_priv->dev;
bool cur_state;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
- else if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else
+ else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ else
+ cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 57552eb..2688f6d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1249,17 +1249,24 @@
DRM_DEBUG_KMS("Turn eDP power off\n");
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
+ intel_dp->want_panel_vdd = false;
+
ironlake_wait_panel_off(intel_dp);
+
+ /* We got a reference when we enabled the VDD. */
+ intel_runtime_pm_put(dev_priv);
}
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1639,7 +1646,7 @@
val |= EDP_PSR_LINK_DISABLE;
I915_WRITE(EDP_PSR_CTL(dev), val |
- IS_BROADWELL(dev) ? 0 : link_entry_time |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
@@ -1784,6 +1791,7 @@
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6db0d9d..ee3181e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -845,7 +845,7 @@
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (IS_G4X(dev))
+ if (!hdmi->has_hdmi_sink || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
@@ -899,8 +899,8 @@
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
- && HAS_PCH_SPLIT(dev)) {
+ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+ clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 350de35..079ea38 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -698,7 +698,7 @@
freq /= 0xff;
ctl = freq << 17;
- if (IS_GEN2(dev) && panel->backlight.combination_mode)
+ if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
@@ -979,7 +979,7 @@
ctl = I915_READ(BLC_PWM_CTL);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d77cc81..e1fc35a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3493,6 +3493,8 @@
u32 pcbr;
int pctx_size = 24*1024;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
@@ -3542,8 +3544,6 @@
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- valleyview_setup_pctx(dev);
-
/* If VLV, Forcewake all wells, else re-direct to regular path */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -4395,6 +4395,8 @@
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_setup_pctx(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 89c484d..4ee702a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -866,13 +866,16 @@
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
- return -EINVAL;
+ pm_runtime_forbid(dev);
+ return -EBUSY;
}
nv_debug_level(SILENT);
@@ -923,12 +926,15 @@
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_crtc *crtc;
- if (nouveau_runtime_pm == 0)
+ if (nouveau_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2cec2ab..607dc14 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1314,7 +1314,7 @@
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6419ca..bbb1784 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3046,7 +3046,7 @@
}
/**
- * cik_select_se_sh - select which SE, SH to address
+ * cik_get_rb_disabled - computes the mask of disabled RBs
*
* @rdev: radeon_device pointer
* @max_rb_num: max RBs (render backends) for the asic
@@ -4134,8 +4134,11 @@
{
if (enable)
WREG32(CP_MEC_CNTL, 0);
- else
+ else {
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ }
udelay(50);
}
@@ -7902,7 +7905,8 @@
/* init golden registers */
cik_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cik_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 1ecb3f1..94626ea 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -264,6 +264,8 @@
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
}
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
}
/**
@@ -291,6 +293,11 @@
u32 me_cntl, reg_offset;
int i;
+ if (enable == false) {
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ }
+
for (i = 0; i < 2; i++) {
if (i == 0)
reg_offset = SDMA0_REGISTER_OFFSET;
@@ -420,10 +427,6 @@
if (!rdev->sdma_fw)
return -EINVAL;
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
/* halt the MEs */
cik_sdma_enable(rdev, false);
@@ -492,9 +495,6 @@
*/
void cik_sdma_fini(struct radeon_device *rdev)
{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
/* halt the MEs */
cik_sdma_enable(rdev, false);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8a2c010..27b0ff1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5299,7 +5299,8 @@
/* init golden registers */
evergreen_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = evergreen_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 76ada8c..3a03ba3 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -57,7 +57,7 @@
#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
+#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ea932ac..bf6300c 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2105,7 +2105,8 @@
/* init golden registers */
ni_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cayman_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ef024ce..3cc78bb 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3942,8 +3942,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 7c63ef8..0b658b3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1430,8 +1430,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3768aab..802b192 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -325,8 +325,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e209eb7..98d6053 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -240,8 +240,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cdbc417..647ef40 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2968,7 +2968,8 @@
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = r600_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b012cbb..044bc98 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1521,13 +1521,16 @@
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
- if (rdev->pm.dpm_enabled) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
+ } else {
+ /* resume old pm late */
+ radeon_pm_resume(rdev);
}
radeon_restore_bios_scratch_regs(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 84a1bbb7..f633c27 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -403,11 +403,15 @@
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (radeon_runtime_pm == 0)
- return -EINVAL;
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
- if (radeon_runtime_pm == -1 && !radeon_is_px())
- return -EINVAL;
+ if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@@ -456,12 +460,15 @@
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_crtc *crtc;
- if (radeon_runtime_pm == 0)
+ if (radeon_runtime_pm == 0) {
+ pm_runtime_forbid(dev);
return -EBUSY;
+ }
/* are we PX enabled? */
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
DRM_DEBUG_DRIVER("failing to power off - not px\n");
+ pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 2aecd6d..66ed3ea 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -33,6 +33,13 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
/**
* radeon_driver_unload_kms - Main unload function for KMS.
*
@@ -130,7 +137,8 @@
"Error during ACPI methods call\n");
}
- if (radeon_runtime_pm != 0) {
+ if ((radeon_runtime_pm == 1) ||
+ ((radeon_runtime_pm == -1) && radeon_is_px())) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 77f5b0c..040a2a1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -714,6 +714,9 @@
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+ /* Change the size here instead of the init above so only lpfn is affected */
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->stollen_vga_memory);
@@ -935,7 +938,7 @@
while (size) {
loff_t p = *pos / PAGE_SIZE;
unsigned off = *pos & ~PAGE_MASK;
- ssize_t cur_size = min(size, PAGE_SIZE - off);
+ size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
struct page *page;
void *ptr;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b5c2369..130d5cc 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -474,8 +474,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fdcde76..72d3616 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1048,8 +1048,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3595073..3462b64 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -756,8 +756,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 98e8138..237dd29 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -586,8 +586,6 @@
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4e37a42..fef3107 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1811,7 +1811,8 @@
/* init golden registers */
rv770_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = rv770_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 8357832..9a124d0 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6618,7 +6618,8 @@
/* init golden registers */
si_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = si_startup(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a066513..214b799 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -351,9 +351,11 @@
moved:
if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
+ if (bdev->driver->invalidate_caches) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ pr_err("Can not flush read caches\n");
+ }
bo->evicted = false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 801231c..0ce48e5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -339,11 +339,13 @@
vma->vm_private_data = bo;
/*
- * PFNMAP is faster than MIXEDMAP due to reduced page
- * administration. So use MIXEDMAP only if private VMA, where
- * we need to support COW.
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
*/
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
@@ -359,7 +361,7 @@
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8d67b94..0394811 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -177,8 +177,10 @@
if (obj->vmapping)
udl_gem_vunmap(obj);
- if (gem_obj->import_attach)
+ if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, obj->sg);
+ put_device(gem_obj->dev->dev);
+ }
if (obj->pages)
udl_gem_put_pages(obj);
@@ -256,9 +258,12 @@
int ret;
/* need to attach */
+ get_device(dev->dev);
attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
+ if (IS_ERR(attach)) {
+ put_device(dev->dev);
return ERR_CAST(attach);
+ }
get_dma_buf(dma_buf);
@@ -282,6 +287,6 @@
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
-
+ put_device(dev->dev);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 82468d9..e7af580 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -830,6 +830,24 @@
if (unlikely(ret != 0))
goto out_unlock;
+ /*
+ * A gb-aware client referencing a shared surface will
+ * expect a backup buffer to be present.
+ */
+ if (dev_priv->has_mob && req->shareable) {
+ uint32_t backup_handle;
+
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index befe0e3..24883b4 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,6 +43,7 @@
#define G25_REV_MIN 0x22
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
+#define G27_2_REV_MIN 0x39
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
@@ -130,6 +131,7 @@
{DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
{G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
{G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
+ {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */
};
/* Recalculates X axis value accordingly to currently selected range */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 1235405..2f19b15 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -42,6 +42,7 @@
#define DUALSHOCK4_CONTROLLER_BT BIT(6)
#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
+#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
#define MAX_LEDS 4
@@ -499,6 +500,7 @@
__u8 right;
#endif
+ __u8 worker_initialized;
__u8 led_state[MAX_LEDS];
__u8 led_count;
};
@@ -993,22 +995,11 @@
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
}
-static void sony_destroy_ff(struct hid_device *hdev)
-{
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- cancel_work_sync(&sc->state_worker);
-}
-
#else
static int sony_init_ff(struct hid_device *hdev)
{
return 0;
}
-
-static void sony_destroy_ff(struct hid_device *hdev)
-{
-}
#endif
static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
@@ -1077,6 +1068,8 @@
if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
ret = sixaxis_set_operational_usb(hdev);
+
+ sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, sixaxis_state_worker);
}
else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
@@ -1087,6 +1080,7 @@
if (ret < 0)
goto err_stop;
+ sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, dualshock4_state_worker);
} else {
ret = 0;
@@ -1101,9 +1095,11 @@
goto err_stop;
}
- ret = sony_init_ff(hdev);
- if (ret < 0)
- goto err_stop;
+ if (sc->quirks & SONY_FF_SUPPORT) {
+ ret = sony_init_ff(hdev);
+ if (ret < 0)
+ goto err_stop;
+ }
return 0;
err_stop:
@@ -1120,7 +1116,8 @@
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(hdev);
- sony_destroy_ff(hdev);
+ if (sc->worker_initialized)
+ cancel_work_sync(&sc->state_worker);
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index cb0137b..ab24ce2 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -320,13 +320,13 @@
hid_hw_close(hidraw->hid);
wake_up_interruptible(&hidraw->wait);
}
+ device_destroy(hidraw_class,
+ MKDEV(hidraw_major, hidraw->minor));
} else {
--hidraw->open;
}
if (!hidraw->open) {
if (!hidraw->exist) {
- device_destroy(hidraw_class,
- MKDEV(hidraw_major, hidraw->minor));
hidraw_table[hidraw->minor] = NULL;
kfree(hidraw);
} else {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f5ed031..de17c55 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -387,7 +387,7 @@
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
- depends on (CPM1 || CPM2) && OF_I2C
+ depends on CPM1 || CPM2
help
This supports the use of the I2C interface on Freescale
processors with CPM1 or CPM2.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index be7f0a2..f3b89a4 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -39,7 +39,9 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e81c554..f9c12e9 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -53,8 +53,8 @@
#include "user.h"
#define DRV_NAME MLX4_IB_DRV_NAME
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "April 4, 2008"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index aa03e73..bf90057 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -46,8 +46,8 @@
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index d18d08a..8ee228e 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -492,12 +492,11 @@
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
- init_waitqueue_head(&isert_conn->conn_wait);
- init_waitqueue_head(&isert_conn->conn_wait_comp_err);
+ init_completion(&isert_conn->conn_wait);
+ init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
- mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@@ -688,11 +687,11 @@
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
mutex_lock(&isert_conn->conn_mutex);
- isert_conn->state = ISER_CONN_DOWN;
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
mutex_unlock(&isert_conn->conn_mutex);
goto wake_up;
}
@@ -712,7 +711,7 @@
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
- wake_up(&isert_conn->conn_wait);
+ complete(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}
@@ -888,16 +887,17 @@
* Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
* bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
*/
- mutex_lock(&isert_conn->conn_comp_mutex);
- if (coalesce &&
+ mutex_lock(&isert_conn->conn_mutex);
+ if (coalesce && isert_conn->state == ISER_CONN_UP &&
++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+ tx_desc->llnode_active = true;
llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
return;
}
isert_conn->conn_comp_batch = 0;
tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
send_wr->send_flags = IB_SEND_SIGNALED;
}
@@ -1464,7 +1464,7 @@
case ISCSI_OP_SCSI_CMD:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1476,7 +1476,7 @@
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1486,7 +1486,7 @@
case ISCSI_OP_TEXT:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
/*
@@ -1549,6 +1549,7 @@
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
cmd->write_data_done = wr->cur_rdma_length;
+ wr->send_wr_num = 0;
pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -1589,7 +1590,7 @@
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
* Call atomic_dec(&isert_conn->post_send_buf_count)
- * from isert_free_conn()
+ * from isert_wait_conn()
*/
isert_conn->logout_posted = true;
iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1613,6 +1614,7 @@
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1624,7 +1626,7 @@
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
}
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1662,7 +1664,7 @@
case ISER_IB_RDMA_READ:
pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
@@ -1691,31 +1693,76 @@
}
static void
-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+{
+ struct llist_node *llnode;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
+
+ mutex_lock(&isert_conn->conn_mutex);
+ llnode = llist_del_all(&isert_conn->conn_comp_llist);
+ isert_conn->conn_comp_batch = 0;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
+
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
+ }
+}
+
+static void
+isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ struct llist_node *llnode = tx_desc->comp_llnode_batch;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
- if (tx_desc) {
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
- if (!isert_cmd)
- isert_unmap_tx_desc(tx_desc, ib_dev);
- else
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
+ }
+ tx_desc->comp_llnode_batch = NULL;
+
+ if (!isert_cmd)
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+ else
+ isert_completion_put(tx_desc, isert_cmd, ib_dev);
+}
+
+static void
+isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct iscsi_conn *conn = isert_conn->conn;
+
+ if (isert_conn->post_recv_buf_count)
+ return;
+
+ isert_cq_drain_comp_llist(isert_conn, ib_dev);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
}
- if (isert_conn->post_recv_buf_count == 0 &&
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- pr_debug("Calling wake_up from isert_cq_comp_err\n");
+ while (atomic_read(&isert_conn->post_send_buf_count))
+ msleep(3000);
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->state != ISER_CONN_DOWN)
- isert_conn->state = ISER_CONN_TERMINATING;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
- wake_up(&isert_conn->conn_wait_comp_err);
- }
+ complete(&isert_conn->conn_wait_comp_err);
}
static void
@@ -1740,8 +1787,14 @@
pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
pr_debug("TX wc.status: 0x%08x\n", wc.status);
pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
- atomic_dec(&isert_conn->post_send_buf_count);
- isert_cq_comp_err(tx_desc, isert_conn);
+
+ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
+ if (tx_desc->llnode_active)
+ continue;
+
+ atomic_dec(&isert_conn->post_send_buf_count);
+ isert_cq_tx_comp_err(tx_desc, isert_conn);
+ }
}
}
@@ -1784,7 +1837,7 @@
wc.vendor_err);
}
isert_conn->post_recv_buf_count--;
- isert_cq_comp_err(NULL, isert_conn);
+ isert_cq_rx_comp_err(isert_conn);
}
}
@@ -2202,6 +2255,7 @@
if (!fr_desc->valid) {
memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
wr = &inv_wr;
@@ -2212,6 +2266,7 @@
/* Prepare FASTREG WR */
memset(&fr_wr, 0, sizeof(fr_wr));
+ fr_wr.wr_id = ISER_FASTREG_LI_WRID;
fr_wr.opcode = IB_WR_FAST_REG_MR;
fr_wr.wr.fast_reg.iova_start =
fr_desc->data_frpl->page_list[0] + page_off;
@@ -2377,12 +2432,12 @@
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr, true);
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
isert_cmd);
@@ -2410,12 +2465,12 @@
return rc;
}
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
@@ -2702,22 +2757,11 @@
kfree(isert_np);
}
-static int isert_check_state(struct isert_conn *isert_conn, int state)
-{
- int ret;
-
- mutex_lock(&isert_conn->conn_mutex);
- ret = (isert_conn->state == state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- return ret;
-}
-
-static void isert_free_conn(struct iscsi_conn *conn)
+static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- pr_debug("isert_free_conn: Starting \n");
+ pr_debug("isert_wait_conn: Starting \n");
/*
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2727,38 +2771,29 @@
atomic_dec(&isert_conn->post_send_buf_count);
if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
- pr_debug("Calling rdma_disconnect from isert_free_conn\n");
+ pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
- if (isert_conn->state == ISER_CONN_UP) {
- pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
- isert_conn->state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- wait_event(isert_conn->conn_wait_comp_err,
- (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
-
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
-
- isert_put_conn(isert_conn);
- return;
- }
if (isert_conn->state == ISER_CONN_INIT) {
mutex_unlock(&isert_conn->conn_mutex);
- isert_put_conn(isert_conn);
return;
}
- pr_debug("isert_free_conn: wait_event conn_wait %d\n",
- isert_conn->state);
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+
+ wait_for_completion(&isert_conn->conn_wait);
+}
+
+static void isert_free_conn(struct iscsi_conn *conn)
+{
+ struct isert_conn *isert_conn = conn->context;
isert_put_conn(isert_conn);
}
@@ -2771,6 +2806,7 @@
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
+ .iscsit_wait_conn = isert_wait_conn,
.iscsit_free_conn = isert_free_conn,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 708a069..f6ae7f5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -6,6 +6,7 @@
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
+#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -45,6 +46,7 @@
struct isert_cmd *isert_cmd;
struct llist_node *comp_llnode_batch;
struct llist_node comp_llnode;
+ bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@@ -116,8 +118,8 @@
struct isert_device *conn_device;
struct work_struct conn_logout_work;
struct mutex conn_mutex;
- wait_queue_head_t conn_wait;
- wait_queue_head_t conn_wait_comp_err;
+ struct completion conn_wait;
+ struct completion conn_wait_comp_err;
struct kref conn_kref;
struct list_head conn_fr_pool;
int conn_fr_pool_size;
@@ -126,7 +128,6 @@
#define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch;
struct llist_head conn_comp_llist;
- struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index bb3b57b..5ef7fcf 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -76,8 +76,18 @@
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+ int val;
- return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
+ mutex_lock(&kpad->gpio_lock);
+
+ if (kpad->dir[bank] & bit)
+ val = kpad->dat_out[bank];
+ else
+ val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
+
+ mutex_unlock(&kpad->gpio_lock);
+
+ return !!(val & bit);
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 1f695f2..184c8f2 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -27,29 +27,32 @@
static void da9052_onkey_query(struct da9052_onkey *onkey)
{
- int key_stat;
+ int ret;
- key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
- if (key_stat < 0) {
+ ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
+ if (ret < 0) {
dev_err(onkey->da9052->dev,
- "Failed to read onkey event %d\n", key_stat);
+ "Failed to read onkey event err=%d\n", ret);
} else {
/*
* Since interrupt for deassertion of ONKEY pin is not
* generated, onkey event state determines the onkey
* button state.
*/
- key_stat &= DA9052_EVENTB_ENONKEY;
- input_report_key(onkey->input, KEY_POWER, key_stat);
- input_sync(onkey->input);
- }
+ bool pressed = !(ret & DA9052_STATUSA_NONKEY);
- /*
- * Interrupt is generated only when the ONKEY pin is asserted.
- * Hence the deassertion of the pin is simulated through work queue.
- */
- if (key_stat)
- schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
+ input_report_key(onkey->input, KEY_POWER, pressed);
+ input_sync(onkey->input);
+
+ /*
+ * Interrupt is generated only when the ONKEY pin
+ * is asserted. Hence the deassertion of the pin
+ * is simulated through work queue.
+ */
+ if (pressed)
+ schedule_delayed_work(&onkey->work,
+ msecs_to_jiffies(50));
+ }
}
static void da9052_onkey_work(struct work_struct *work)
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 87095e2..8af34ff 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -409,7 +409,6 @@
__clear_bit(REL_X, input->relbit);
__clear_bit(REL_Y, input->relbit);
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 26386f9..d8d49d1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -265,11 +265,22 @@
* Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
+
+static const int *quirk_min_max;
+
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
+ if (quirk_min_max) {
+ priv->x_min = quirk_min_max[0];
+ priv->x_max = quirk_min_max[1];
+ priv->y_min = quirk_min_max[2];
+ priv->y_max = quirk_min_max[3];
+ return 0;
+ }
+
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@@ -1485,10 +1496,54 @@
{ }
};
+static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+#if defined(CONFIG_DMI)
+ {
+ /* Lenovo ThinkPad Helix */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+ },
+ .driver_data = (int []){1024, 5052, 2258, 4832},
+ },
+ {
+ /* Lenovo ThinkPad X240 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+ },
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
+ /* Lenovo ThinkPad T440s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad T540p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+ },
+ .driver_data = (int []){1024, 5056, 2058, 4832},
+ },
+#endif
+ { }
+};
+
void __init synaptics_module_init(void)
{
+ const struct dmi_system_id *min_max_dmi;
+
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+
+ min_max_dmi = dmi_first_match(min_max_dmi_table);
+ if (min_max_dmi)
+ quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index f046865..9816c51 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -16,16 +16,6 @@
This will increase the size of the kernelcapi module by 20 KB.
If unsure, say Y.
-config ISDN_CAPI_MIDDLEWARE
- bool "CAPI2.0 Middleware support"
- depends on TTY
- help
- This option will enhance the capabilities of the /dev/capi20
- interface. It will provide a means of moving a data connection,
- established via the usual /dev/capi20 interface to a special tty
- device. If you want to use pppd with pppdcapiplugin to dial up to
- your ISP, say Y here.
-
config ISDN_CAPI_CAPI20
tristate "CAPI2.0 /dev/capi support"
help
@@ -34,6 +24,16 @@
standardized libcapi20 to access this functionality. You should say
Y/M here.
+config ISDN_CAPI_MIDDLEWARE
+ bool "CAPI2.0 Middleware support"
+ depends on ISDN_CAPI_CAPI20 && TTY
+ help
+ This option will enhance the capabilities of the /dev/capi20
+ interface. It will provide a means of moving a data connection,
+ established via the usual /dev/capi20 interface to a special tty
+ device. If you want to use pppd with pppdcapiplugin to dial up to
+ your ISP, say Y here.
+
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
depends on ISDN_I4L
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 9a06fe8..95ad936 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -254,16 +254,6 @@
---help---
Provides thin provisioning and snapshots that share a data store.
-config DM_DEBUG_BLOCK_STACK_TRACING
- boolean "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
- select STACKTRACE
- ---help---
- Enable this for messages that may help debug problems with the
- block manager locking used by thin provisioning and caching.
-
- If unsure, say N.
-
config DM_CACHE
tristate "Cache target (EXPERIMENTAL)"
depends on BLK_DEV_DM
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 1e018e9..0e385e4 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -872,7 +872,7 @@
{
struct mq_policy *mq = to_mq_policy(p);
- kfree(mq->table);
+ vfree(mq->table);
epool_exit(&mq->cache_pool);
epool_exit(&mq->pre_cache_pool);
kfree(mq);
@@ -1245,7 +1245,7 @@
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
mq->hash_bits = ffs(mq->nr_buckets) - 1;
- mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+ mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
if (!mq->table)
goto bad_alloc_table;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1af7014..074b9c8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -979,12 +979,13 @@
int r;
struct dm_io_region o_region, c_region;
struct cache *cache = mg->cache;
+ sector_t cblock = from_cblock(mg->cblock);
o_region.bdev = cache->origin_dev->bdev;
o_region.count = cache->sectors_per_block;
c_region.bdev = cache->cache_dev->bdev;
- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.sector = cblock * cache->sectors_per_block;
c_region.count = cache->sectors_per_block;
if (mg->writeback || mg->demote) {
@@ -2464,20 +2465,18 @@
bool discarded_block;
struct dm_bio_prison_cell *cell;
struct policy_result lookup_result;
- struct per_bio_data *pb;
+ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
* the end of the origin device. We don't cache these.
* Just remap to the origin and carry on.
*/
- remap_to_origin_clear_discard(cache, bio, block);
+ remap_to_origin(cache, bio);
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio, pb_data_size);
-
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index afc3d01..d6e8817 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -546,6 +546,9 @@
r = insert_exceptions(ps, area, callback, callback_context,
&full);
+ if (!full)
+ memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
+
dm_bufio_release(bp);
dm_bufio_forget(client, chunk);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index baa87ff..fb9efc8 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -76,7 +76,7 @@
#define THIN_SUPERBLOCK_MAGIC 27022010
#define THIN_SUPERBLOCK_LOCATION 0
-#define THIN_VERSION 1
+#define THIN_VERSION 2
#define THIN_METADATA_CACHE_SIZE 64
#define SECTOR_TO_BLOCK_SHIFT 3
@@ -1755,3 +1755,38 @@
return r;
}
+
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct thin_disk_superblock *disk_super;
+
+ down_write(&pmd->root_lock);
+ pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
+
+ r = superblock_lock(pmd, &sblock);
+ if (r) {
+ DMERR("couldn't read superblock");
+ goto out;
+ }
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = cpu_to_le32(pmd->flags);
+
+ dm_bm_unlock(sblock);
+out:
+ up_write(&pmd->root_lock);
+ return r;
+}
+
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+{
+ bool needs_check;
+
+ down_read(&pmd->root_lock);
+ needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
+ up_read(&pmd->root_lock);
+
+ return needs_check;
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 82ea384..e3c857d 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -25,6 +25,11 @@
/*----------------------------------------------------------------*/
+/*
+ * Thin metadata superblock flags.
+ */
+#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0)
+
struct dm_pool_metadata;
struct dm_thin_device;
@@ -202,6 +207,12 @@
dm_sm_threshold_fn fn,
void *context);
+/*
+ * Updates the superblock immediately.
+ */
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
+
/*----------------------------------------------------------------*/
#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7e84bac..be70d38 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -130,10 +130,11 @@
struct dm_thin_new_mapping;
/*
- * The pool runs in 3 modes. Ordered in degraded order for comparisons.
+ * The pool runs in 4 modes. Ordered in degraded order for comparisons.
*/
enum pool_mode {
PM_WRITE, /* metadata may be changed */
+ PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
PM_READ_ONLY, /* metadata may not be changed */
PM_FAIL, /* all I/O fails */
};
@@ -198,7 +199,6 @@
};
static enum pool_mode get_pool_mode(struct pool *pool);
-static void out_of_data_space(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
/*
@@ -226,6 +226,7 @@
struct pool *pool;
struct dm_thin_device *td;
+ bool requeue_mode:1;
};
/*----------------------------------------------------------------*/
@@ -369,14 +370,18 @@
struct dm_thin_new_mapping *overwrite_mapping;
};
-static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
+static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
struct bio *bio;
struct bio_list bios;
+ unsigned long flags;
bio_list_init(&bios);
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
bio_list_merge(&bios, master);
bio_list_init(master);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -391,12 +396,26 @@
static void requeue_io(struct thin_c *tc)
{
struct pool *pool = tc->pool;
+
+ requeue_bio_list(tc, &pool->deferred_bios);
+ requeue_bio_list(tc, &pool->retry_on_resume_list);
+}
+
+static void error_retry_list(struct pool *pool)
+{
+ struct bio *bio;
unsigned long flags;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
spin_lock_irqsave(&pool->lock, flags);
- __requeue_bio_list(tc, &pool->deferred_bios);
- __requeue_bio_list(tc, &pool->retry_on_resume_list);
+ bio_list_merge(&bios, &pool->retry_on_resume_list);
+ bio_list_init(&pool->retry_on_resume_list);
spin_unlock_irqrestore(&pool->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
}
/*
@@ -925,13 +944,15 @@
}
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
dm_block_t free_blocks;
struct pool *pool = tc->pool;
- if (get_pool_mode(pool) != PM_WRITE)
+ if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
return -EINVAL;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
@@ -958,7 +979,7 @@
}
if (!free_blocks) {
- out_of_data_space(pool);
+ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
return -ENOSPC;
}
}
@@ -988,15 +1009,32 @@
spin_unlock_irqrestore(&pool->lock, flags);
}
+static bool should_error_unserviceable_bio(struct pool *pool)
+{
+ enum pool_mode m = get_pool_mode(pool);
+
+ switch (m) {
+ case PM_WRITE:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
+ return true;
+
+ case PM_OUT_OF_DATA_SPACE:
+ return pool->pf.error_if_no_space;
+
+ case PM_READ_ONLY:
+ case PM_FAIL:
+ return true;
+ default:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
+ return true;
+ }
+}
+
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
{
- /*
- * When pool is read-only, no cell locking is needed because
- * nothing is changing.
- */
- WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
-
- if (pool->pf.error_if_no_space)
+ if (should_error_unserviceable_bio(pool))
bio_io_error(bio);
else
retry_on_resume(bio);
@@ -1007,11 +1045,20 @@
struct bio *bio;
struct bio_list bios;
+ if (should_error_unserviceable_bio(pool)) {
+ cell_error(pool, cell);
+ return;
+ }
+
bio_list_init(&bios);
cell_release(pool, cell, &bios);
- while ((bio = bio_list_pop(&bios)))
- handle_unserviceable_bio(pool, bio);
+ if (should_error_unserviceable_bio(pool))
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+ else
+ while ((bio = bio_list_pop(&bios)))
+ retry_on_resume(bio);
}
static void process_discard(struct thin_c *tc, struct bio *bio)
@@ -1296,6 +1343,11 @@
}
}
+static void process_bio_success(struct thin_c *tc, struct bio *bio)
+{
+ bio_endio(bio, 0);
+}
+
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{
bio_io_error(bio);
@@ -1328,6 +1380,11 @@
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ continue;
+ }
+
/*
* If we've got no free new_mapping structs, and processing
* this bio might require one, we pause until there are some
@@ -1394,51 +1451,134 @@
/*----------------------------------------------------------------*/
+struct noflush_work {
+ struct work_struct worker;
+ struct thin_c *tc;
+
+ atomic_t complete;
+ wait_queue_head_t wait;
+};
+
+static void complete_noflush_work(struct noflush_work *w)
+{
+ atomic_set(&w->complete, 1);
+ wake_up(&w->wait);
+}
+
+static void do_noflush_start(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = true;
+ requeue_io(w->tc);
+ complete_noflush_work(w);
+}
+
+static void do_noflush_stop(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = false;
+ complete_noflush_work(w);
+}
+
+static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
+{
+ struct noflush_work w;
+
+ INIT_WORK(&w.worker, fn);
+ w.tc = tc;
+ atomic_set(&w.complete, 0);
+ init_waitqueue_head(&w.wait);
+
+ queue_work(tc->pool->wq, &w.worker);
+
+ wait_event(w.wait, atomic_read(&w.complete));
+}
+
+/*----------------------------------------------------------------*/
+
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
+static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
+{
+ dm_table_event(pool->ti->table);
+ DMINFO("%s: switching pool to %s mode",
+ dm_device_name(pool->pool_md), new_mode);
+}
+
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{
- int r;
- enum pool_mode old_mode = pool->pf.mode;
+ struct pool_c *pt = pool->ti->private;
+ bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
+ enum pool_mode old_mode = get_pool_mode(pool);
+
+ /*
+ * Never allow the pool to transition to PM_WRITE mode if user
+ * intervention is required to verify metadata and data consistency.
+ */
+ if (new_mode == PM_WRITE && needs_check) {
+ DMERR("%s: unable to switch pool to write mode until repaired.",
+ dm_device_name(pool->pool_md));
+ if (old_mode != new_mode)
+ new_mode = old_mode;
+ else
+ new_mode = PM_READ_ONLY;
+ }
+ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode.
+ */
+ if (old_mode == PM_FAIL)
+ new_mode = old_mode;
switch (new_mode) {
case PM_FAIL:
if (old_mode != new_mode)
- DMERR("%s: switching pool to failure mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
pool->process_prepared_discard = process_prepared_discard_fail;
+
+ error_retry_list(pool);
break;
case PM_READ_ONLY:
if (old_mode != new_mode)
- DMERR("%s: switching pool to read-only mode",
- dm_device_name(pool->pool_md));
- r = dm_pool_abort_metadata(pool->pmd);
- if (r) {
- DMERR("%s: aborting transaction failed",
- dm_device_name(pool->pool_md));
- new_mode = PM_FAIL;
- set_pool_mode(pool, new_mode);
- } else {
- dm_pool_metadata_read_only(pool->pmd);
- pool->process_bio = process_bio_read_only;
- pool->process_discard = process_discard;
- pool->process_prepared_mapping = process_prepared_mapping_fail;
- pool->process_prepared_discard = process_prepared_discard_passdown;
- }
+ notify_of_pool_mode_change(pool, "read-only");
+ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_bio_success;
+ pool->process_prepared_mapping = process_prepared_mapping_fail;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
+
+ error_retry_list(pool);
+ break;
+
+ case PM_OUT_OF_DATA_SPACE:
+ /*
+ * Ideally we'd never hit this state; the low water mark
+ * would trigger userland to extend the pool before we
+ * completely run out of data space. However, many small
+ * IOs to unprovisioned space can consume data space at an
+ * alarming rate. Adjust your low water mark if you're
+ * frequently seeing this mode.
+ */
+ if (old_mode != new_mode)
+ notify_of_pool_mode_change(pool, "out-of-data-space");
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
break;
case PM_WRITE:
if (old_mode != new_mode)
- DMINFO("%s: switching pool to write mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "write");
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
@@ -1448,32 +1588,35 @@
}
pool->pf.mode = new_mode;
+ /*
+ * The pool mode may have changed, sync it so bind_control_target()
+ * doesn't cause an unexpected mode transition on resume.
+ */
+ pt->adjusted_pf.mode = new_mode;
}
-/*
- * Rather than calling set_pool_mode directly, use these which describe the
- * reason for mode degradation.
- */
-static void out_of_data_space(struct pool *pool)
+static void abort_transaction(struct pool *pool)
{
- DMERR_LIMIT("%s: no free data space available.",
- dm_device_name(pool->pool_md));
- set_pool_mode(pool, PM_READ_ONLY);
+ const char *dev_name = dm_device_name(pool->pool_md);
+
+ DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+ if (dm_pool_abort_metadata(pool->pmd)) {
+ DMERR("%s: failed to abort metadata transaction", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
+
+ if (dm_pool_metadata_set_needs_check(pool->pmd)) {
+ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
}
static void metadata_operation_failed(struct pool *pool, const char *op, int r)
{
- dm_block_t free_blocks;
-
DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
dm_device_name(pool->pool_md), op, r);
- if (r == -ENOSPC &&
- !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
- !free_blocks)
- DMERR_LIMIT("%s: no free metadata space available.",
- dm_device_name(pool->pool_md));
-
+ abort_transaction(pool);
set_pool_mode(pool, PM_READ_ONLY);
}
@@ -1524,6 +1667,11 @@
thin_hook_bio(tc, bio);
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ return DM_MAPIO_SUBMITTED;
+ }
+
if (get_pool_mode(tc->pool) == PM_FAIL) {
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
@@ -1687,7 +1835,7 @@
/*
* We want to make sure that a pool in PM_FAIL mode is never upgraded.
*/
- enum pool_mode old_mode = pool->pf.mode;
+ enum pool_mode old_mode = get_pool_mode(pool);
enum pool_mode new_mode = pt->adjusted_pf.mode;
/*
@@ -1701,16 +1849,6 @@
pool->pf = pt->adjusted_pf;
pool->low_water_blocks = pt->low_water_blocks;
- /*
- * If we were in PM_FAIL mode, rollback of metadata failed. We're
- * not going to recover without a thin_repair. So we never let the
- * pool move out of the old mode. On the other hand a PM_READ_ONLY
- * may have been due to a lack of metadata or data space, and may
- * now work (ie. if the underlying devices have been resized).
- */
- if (old_mode == PM_FAIL)
- new_mode = old_mode;
-
set_pool_mode(pool, new_mode);
return 0;
@@ -2253,6 +2391,12 @@
return -EINVAL;
} else if (data_size > sb_data_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the data device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
if (sb_data_size)
DMINFO("%s: growing the data device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
@@ -2294,6 +2438,12 @@
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the metadata device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
warn_if_metadata_device_too_big(pool->md_dev);
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
@@ -2681,7 +2831,9 @@
else
DMEMIT("- ");
- if (pool->pf.mode == PM_READ_ONLY)
+ if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ DMEMIT("out_of_data_space ");
+ else if (pool->pf.mode == PM_READ_ONLY)
DMEMIT("ro ");
else
DMEMIT("rw ");
@@ -2795,7 +2947,7 @@
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2997,10 +3149,23 @@
return 0;
}
+static void thin_presuspend(struct dm_target *ti)
+{
+ struct thin_c *tc = ti->private;
+
+ if (dm_noflush_suspending(ti))
+ noflush_work(tc, do_noflush_start);
+}
+
static void thin_postsuspend(struct dm_target *ti)
{
- if (dm_noflush_suspending(ti))
- requeue_io((struct thin_c *)ti->private);
+ struct thin_c *tc = ti->private;
+
+ /*
+ * The dm_noflush_suspending flag has been cleared by now, so
+ * unfortunately we must always run this.
+ */
+ noflush_work(tc, do_noflush_stop);
}
/*
@@ -3085,12 +3250,13 @@
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
.map = thin_map,
.end_io = thin_endio,
+ .presuspend = thin_presuspend,
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index 19b2687..0c2dec7 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -6,3 +6,13 @@
---help---
Library providing immutable on-disk data structure support for
device-mapper targets such as the thin provisioning target.
+
+config DM_DEBUG_BLOCK_STACK_TRACING
+ boolean "Keep stack trace of persistent data block lock holders"
+ depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
+ select STACKTRACE
+ ---help---
+ Enable this for messages that may help debug problems with the
+ block manager locking used by thin provisioning and caching.
+
+ If unsure, say N.
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index e9bdd46..786b689 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -91,6 +91,69 @@
dm_block_t block;
};
+struct bop_ring_buffer {
+ unsigned begin;
+ unsigned end;
+ struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
+};
+
+static void brb_init(struct bop_ring_buffer *brb)
+{
+ brb->begin = 0;
+ brb->end = 0;
+}
+
+static bool brb_empty(struct bop_ring_buffer *brb)
+{
+ return brb->begin == brb->end;
+}
+
+static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
+{
+ unsigned r = old + 1;
+ return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+}
+
+static int brb_push(struct bop_ring_buffer *brb,
+ enum block_op_type type, dm_block_t b)
+{
+ struct block_op *bop;
+ unsigned next = brb_next(brb, brb->end);
+
+ /*
+ * We don't allow the last bop to be filled, this way we can
+ * differentiate between full and empty.
+ */
+ if (next == brb->begin)
+ return -ENOMEM;
+
+ bop = brb->bops + brb->end;
+ bop->type = type;
+ bop->block = b;
+
+ brb->end = next;
+
+ return 0;
+}
+
+static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+{
+ struct block_op *bop;
+
+ if (brb_empty(brb))
+ return -ENODATA;
+
+ bop = brb->bops + brb->begin;
+ result->type = bop->type;
+ result->block = bop->block;
+
+ brb->begin = brb_next(brb, brb->begin);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
struct sm_metadata {
struct dm_space_map sm;
@@ -101,25 +164,20 @@
unsigned recursion_count;
unsigned allocated_this_transaction;
- unsigned nr_uncommitted;
- struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+ struct bop_ring_buffer uncommitted;
struct threshold threshold;
};
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
{
- struct block_op *op;
+ int r = brb_push(&smm->uncommitted, type, b);
- if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
+ if (r) {
DMERR("too many recursive allocations");
return -ENOMEM;
}
- op = smm->uncommitted + smm->nr_uncommitted++;
- op->type = type;
- op->block = b;
-
return 0;
}
@@ -158,11 +216,17 @@
return -ENOMEM;
}
- if (smm->recursion_count == 1 && smm->nr_uncommitted) {
- while (smm->nr_uncommitted && !r) {
- smm->nr_uncommitted--;
- r = commit_bop(smm, smm->uncommitted +
- smm->nr_uncommitted);
+ if (smm->recursion_count == 1) {
+ while (!brb_empty(&smm->uncommitted)) {
+ struct block_op bop;
+
+ r = brb_pop(&smm->uncommitted, &bop);
+ if (r) {
+ DMERR("bug in bop ring buffer");
+ break;
+ }
+
+ r = commit_bop(smm, &bop);
if (r)
break;
}
@@ -217,7 +281,8 @@
static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
- int r, i;
+ int r;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
unsigned adjustment = 0;
@@ -225,8 +290,10 @@
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -254,7 +321,8 @@
static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
dm_block_t b, int *result)
{
- int r, i, adjustment = 0;
+ int r, adjustment = 0;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
uint32_t rc;
@@ -262,8 +330,11 @@
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -671,7 +742,7 @@
smm->begin = superblock + 1;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
@@ -715,7 +786,7 @@
smm->begin = 0;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b9e2000..95c8944 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -240,7 +240,7 @@
nid = cpu_to_node(cpu);
page = alloc_pages_exact_node(nid,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6d20fbd..dcde5605 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -181,7 +181,7 @@
*/
static inline void __disable_port(struct port *port)
{
- bond_set_slave_inactive_flags(port->slave);
+ bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
@@ -193,7 +193,7 @@
struct slave *slave = port->slave;
if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
@@ -2062,6 +2062,7 @@
struct list_head *iter;
struct slave *slave;
struct port *port;
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
read_lock(&bond->lock);
rcu_read_lock();
@@ -2119,8 +2120,19 @@
}
re_arm:
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
rcu_read_unlock();
read_unlock(&bond->lock);
+
+ if (should_notify_rtnl && rtnl_trylock()) {
+ bond_slave_state_notify(bond);
+ rtnl_unlock();
+ }
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index a2c4747..e8f133e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -730,7 +730,7 @@
client_info->ntt = 0;
}
- if (!vlan_get_tag(skb, &client_info->vlan_id))
+ if (vlan_get_tag(skb, &client_info->vlan_id))
client_info->vlan_id = 0;
if (!client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1c6104d..e5628fc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -829,21 +829,25 @@
if (bond_is_lb(bond)) {
bond_alb_handle_active_change(bond, new_active);
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active)
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
bool should_notify_peers = false;
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (bond->params.fail_over_mac)
bond_do_fail_over_mac(bond, new_active,
@@ -1193,6 +1197,11 @@
return -EBUSY;
}
+ if (bond_dev == slave_dev) {
+ pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
+ return -EPERM;
+ }
+
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
@@ -1463,14 +1472,15 @@
switch (bond->params.mode) {
case BOND_MODE_ACTIVEBACKUP:
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave,
+ BOND_SLAVE_NOTIFY_NOW);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
* will activate the slaves in the selected
* aggregator
*/
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
@@ -1488,7 +1498,7 @@
case BOND_MODE_TLB:
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
pr_debug("This slave is always active in trunk mode\n");
@@ -1654,9 +1664,6 @@
return -EINVAL;
}
- /* release the slave from its bond */
- bond->slave_cnt--;
-
bond_sysfs_slave_del(slave);
bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -1738,6 +1745,7 @@
unblock_netpoll_tx();
synchronize_rcu();
+ bond->slave_cnt--;
if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
@@ -2015,7 +2023,8 @@
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
bond->params.mode == BOND_MODE_8023AD)
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2562,7 +2571,8 @@
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {
bond_set_slave_inactive_flags(
- bond->current_arp_slave);
+ bond->current_arp_slave,
+ BOND_SLAVE_NOTIFY_NOW);
bond->current_arp_slave = NULL;
}
@@ -2582,7 +2592,8 @@
slave->link_failure_count++;
slave->link = BOND_LINK_DOWN;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2615,17 +2626,17 @@
/*
* Send ARP probes for active-backup mode ARP monitor.
+ *
+ * Called with rcu_read_lock hold.
*/
static bool bond_ab_arp_probe(struct bonding *bond)
{
struct slave *slave, *before = NULL, *new_slave = NULL,
- *curr_arp_slave, *curr_active_slave;
+ *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
+ *curr_active_slave = rcu_dereference(bond->curr_active_slave);
struct list_head *iter;
bool found = false;
-
- rcu_read_lock();
- curr_arp_slave = rcu_dereference(bond->current_arp_slave);
- curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
if (curr_arp_slave && curr_active_slave)
pr_info("PROBE: c_arp %s && cas %s BAD\n",
@@ -2634,32 +2645,23 @@
if (curr_active_slave) {
bond_arp_send_all(bond, curr_active_slave);
- rcu_read_unlock();
- return true;
+ return should_notify_rtnl;
}
- rcu_read_unlock();
/* if we don't have a curr_active_slave, search for the next available
* backup slave from the current_arp_slave and make it the candidate
* for becoming the curr_active_slave
*/
- if (!rtnl_trylock())
- return false;
- /* curr_arp_slave might have gone away */
- curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
-
if (!curr_arp_slave) {
- curr_arp_slave = bond_first_slave(bond);
- if (!curr_arp_slave) {
- rtnl_unlock();
- return true;
- }
+ curr_arp_slave = bond_first_slave_rcu(bond);
+ if (!curr_arp_slave)
+ return should_notify_rtnl;
}
- bond_set_slave_inactive_flags(curr_arp_slave);
+ bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && IS_UP(slave->dev))
before = slave;
@@ -2677,7 +2679,8 @@
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_LATER);
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
@@ -2689,26 +2692,31 @@
if (!new_slave && before)
new_slave = before;
- if (!new_slave) {
- rtnl_unlock();
- return true;
- }
+ if (!new_slave)
+ goto check_state;
new_slave->link = BOND_LINK_BACK;
- bond_set_slave_active_flags(new_slave);
+ bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
new_slave->jiffies = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
- rtnl_unlock();
- return true;
+check_state:
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
+ return should_notify_rtnl;
}
static void bond_activebackup_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
- bool should_notify_peers = false, should_commit = false;
+ bool should_notify_peers = false;
+ bool should_notify_rtnl = false;
int delta_in_ticks;
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -2717,11 +2725,12 @@
goto re_arm;
rcu_read_lock();
- should_notify_peers = bond_should_notify_peers(bond);
- should_commit = bond_ab_arp_inspect(bond);
- rcu_read_unlock();
- if (should_commit) {
+ should_notify_peers = bond_should_notify_peers(bond);
+
+ if (bond_ab_arp_inspect(bond)) {
+ rcu_read_unlock();
+
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
delta_in_ticks = 1;
@@ -2730,23 +2739,28 @@
}
bond_ab_arp_commit(bond);
+
rtnl_unlock();
+ rcu_read_lock();
}
- if (!bond_ab_arp_probe(bond)) {
- /* rtnl locking failed, re-arm */
- delta_in_ticks = 1;
- should_notify_peers = false;
- }
+ should_notify_rtnl = bond_ab_arp_probe(bond);
+ rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- if (should_notify_peers) {
+ if (should_notify_peers || should_notify_rtnl) {
if (!rtnl_trylock())
return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
+
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
+ if (should_notify_rtnl)
+ bond_slave_state_notify(bond);
+
rtnl_unlock();
}
}
@@ -3046,9 +3060,11 @@
bond_for_each_slave(bond, slave, iter) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
}
}
read_unlock(&bond->curr_slave_lock);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index c378784..298c265 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -121,6 +121,7 @@
static struct bond_opt_value bond_lp_interval_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
};
static struct bond_option bond_opts[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 86ccfb9..2b0fdec 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -195,7 +195,8 @@
s8 new_link;
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
- inactive:1; /* indicates inactive slave */
+ inactive:1, /* indicates inactive slave */
+ should_notify:1; /* indicateds whether the state changed */
u8 duplex;
u32 original_mtu;
u32 link_failure_count;
@@ -303,6 +304,24 @@
}
}
+static inline void bond_set_slave_state(struct slave *slave,
+ int slave_state, bool notify)
+{
+ if (slave->backup == slave_state)
+ return;
+
+ slave->backup = slave_state;
+ if (notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ slave->should_notify = 0;
+ } else {
+ if (slave->should_notify)
+ slave->should_notify = 0;
+ else
+ slave->should_notify = 1;
+ }
+}
+
static inline void bond_slave_state_change(struct bonding *bond)
{
struct list_head *iter;
@@ -316,6 +335,19 @@
}
}
+static inline void bond_slave_state_notify(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->should_notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+ tmp->should_notify = 0;
+ }
+ }
+}
+
static inline int bond_slave_state(struct slave *slave)
{
return slave->backup;
@@ -343,6 +375,9 @@
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_SLAVE_NOTIFY_NOW true
+#define BOND_SLAVE_NOTIFY_LATER false
+
static inline int slave_do_arp_validate(struct bonding *bond,
struct slave *slave)
{
@@ -394,17 +429,19 @@
}
#endif
-static inline void bond_set_slave_inactive_flags(struct slave *slave)
+static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bool notify)
{
if (!bond_is_lb(slave->bond))
- bond_set_backup_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
}
-static inline void bond_set_slave_active_flags(struct slave *slave)
+static inline void bond_set_slave_active_flags(struct slave *slave,
+ bool notify)
{
- bond_set_active_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 320bef2..61376ab 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,6 +144,8 @@
#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
+#define FLEXCAN_TIMEOUT_US (50)
+
/*
* FLEXCAN hardware feature flags
*
@@ -262,6 +264,22 @@
}
#endif
+static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_disable(priv->reg_xceiver);
+}
+
static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
u32 reg_esr)
{
@@ -269,26 +287,95 @@
(reg_esr & FLEXCAN_ESR_ERR_BUS);
}
-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
+static int flexcan_chip_enable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(®s->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
flexcan_write(reg, ®s->mcr);
- udelay(10);
+ while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
}
-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
+static int flexcan_chip_disable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(®s->mcr);
reg |= FLEXCAN_MCR_MDIS;
flexcan_write(reg, ®s->mcr);
+
+ while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_freeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+ u32 reg;
+
+ reg = flexcan_read(®s->mcr);
+ reg |= FLEXCAN_MCR_HALT;
+ flexcan_write(reg, ®s->mcr);
+
+ while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(100, 200);
+
+ if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = flexcan_read(®s->mcr);
+ reg &= ~FLEXCAN_MCR_HALT;
+ flexcan_write(reg, ®s->mcr);
+
+ while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_softreset(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+ flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr);
+ while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST))
+ usleep_range(10, 20);
+
+ if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)
+ return -ETIMEDOUT;
+
+ return 0;
}
static int flexcan_get_berr_counter(const struct net_device *dev,
@@ -709,19 +796,14 @@
u32 reg_mcr, reg_ctrl;
/* enable module */
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ return err;
/* soft reset */
- flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr);
- udelay(10);
-
- reg_mcr = flexcan_read(®s->mcr);
- if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
- netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n",
- reg_mcr);
- err = -ENODEV;
- goto out;
- }
+ err = flexcan_chip_softreset(priv);
+ if (err)
+ goto out_chip_disable;
flexcan_set_bittiming(dev);
@@ -788,16 +870,14 @@
if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
flexcan_write(0x0, ®s->rxfgmask);
- if (priv->reg_xceiver) {
- err = regulator_enable(priv->reg_xceiver);
- if (err)
- goto out;
- }
+ err = flexcan_transceiver_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* synchronize with the can bus */
- reg_mcr = flexcan_read(®s->mcr);
- reg_mcr &= ~FLEXCAN_MCR_HALT;
- flexcan_write(reg_mcr, ®s->mcr);
+ err = flexcan_chip_unfreeze(priv);
+ if (err)
+ goto out_transceiver_disable;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -810,7 +890,9 @@
return 0;
- out:
+ out_transceiver_disable:
+ flexcan_transceiver_disable(priv);
+ out_chip_disable:
flexcan_chip_disable(priv);
return err;
}
@@ -825,18 +907,17 @@
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
- u32 reg;
+
+ /* freeze + disable module */
+ flexcan_chip_freeze(priv);
+ flexcan_chip_disable(priv);
/* Disable all interrupts */
flexcan_write(0, ®s->imask1);
+ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ ®s->ctrl);
- /* Disable + halt module */
- reg = flexcan_read(®s->mcr);
- reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
- flexcan_write(reg, ®s->mcr);
-
- if (priv->reg_xceiver)
- regulator_disable(priv->reg_xceiver);
+ flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
return;
@@ -866,7 +947,7 @@
/* start chip and queuing */
err = flexcan_chip_start(dev);
if (err)
- goto out_close;
+ goto out_free_irq;
can_led_event(dev, CAN_LED_EVENT_OPEN);
@@ -875,6 +956,8 @@
return 0;
+ out_free_irq:
+ free_irq(dev->irq, dev);
out_close:
close_candev(dev);
out_disable_per:
@@ -945,12 +1028,16 @@
goto out_disable_ipg;
/* select "bus clock", chip must be disabled */
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ goto out_disable_per;
reg = flexcan_read(®s->ctrl);
reg |= FLEXCAN_CTRL_CLK_SRC;
flexcan_write(reg, ®s->ctrl);
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* set freeze, halt and activate FIFO, restrict register access */
reg = flexcan_read(®s->mcr);
@@ -967,14 +1054,15 @@
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
- goto out_disable_per;
+ goto out_chip_disable;
}
err = register_candev(dev);
- out_disable_per:
/* disable core and turn off clocks */
+ out_chip_disable:
flexcan_chip_disable(priv);
+ out_disable_per:
clk_disable_unprepare(priv->clk_per);
out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
@@ -1104,9 +1192,10 @@
static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
unregister_flexcandev(dev);
-
+ netif_napi_del(&priv->napi);
free_candev(dev);
return 0;
@@ -1117,8 +1206,11 @@
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
if (netif_running(dev)) {
netif_stop_queue(dev);
@@ -1139,9 +1231,7 @@
netif_device_attach(dev);
netif_start_queue(dev);
}
- flexcan_chip_enable(priv);
-
- return 0;
+ return flexcan_chip_enable(priv);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 2e45f6e..380d249 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1248,19 +1248,13 @@
* shared register for the high 32 bits, so only a single, aligned,
* 4 GB physical address range can be used for descriptors.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA config, aborting\n");
- goto out_pci_disable;
- }
+ dev_err(&pdev->dev, "No usable DMA config, aborting\n");
+ goto out_pci_disable;
}
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index d5c2d3e..422aab2 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2436,7 +2436,7 @@
err_register:
err_sw_init:
err_eeprom:
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
free_netdev(netdev);
@@ -2474,7 +2474,7 @@
unregister_netdev(netdev);
atl1e_free_ring_resources(adapter);
atl1e_force_ps(&adapter->hw);
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
pci_release_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 1f7b5aa..8a7bf7d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1484,6 +1484,10 @@
add_timer(&bp->timer);
b44_enable_ints(bp);
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_start(bp->phydev);
+
netif_start_queue(dev);
out:
return err;
@@ -1646,6 +1650,9 @@
netif_stop_queue(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_stop(bp->phydev);
+
napi_disable(&bp->napi);
del_timer_sync(&bp->timer);
@@ -2222,7 +2229,12 @@
}
if (status_changed) {
- b44_check_phy(bp);
+ u32 val = br32(bp, B44_TX_CTRL);
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
+ bw32(bp, B44_TX_CTRL, val);
phy_print_status(phydev);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index cda25ac..6c9e1c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2507,6 +2507,7 @@
bp->fw_wr_seq++;
msg_data |= bp->fw_wr_seq;
+ bp->fw_last_msg = msg_data;
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
@@ -4000,8 +4001,23 @@
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
+ u32 val;
+
+ wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
+ if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ return;
+ }
+ /* Tell firmware not to power down the PHY yet, otherwise
+ * the chip will take a long time to respond to MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
+ val | BNX2_PORT_FEATURE_ASF_ENABLED);
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
+ }
}
@@ -4033,9 +4049,22 @@
if (bp->wol)
pci_set_power_state(bp->pdev, PCI_D3hot);
- } else {
- pci_set_power_state(bp->pdev, PCI_D3hot);
+ break;
+
}
+ if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ u32 val;
+
+ /* Tell firmware not to power down the PHY yet,
+ * otherwise the other port may not respond to
+ * MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+ val &= ~BNX2_CONDITION_PM_STATE_MASK;
+ val |= BNX2_CONDITION_PM_STATE_UNPREP;
+ bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
+ }
+ pci_set_power_state(bp->pdev, PCI_D3hot);
/* No more memory access after this point until
* device is brought back to D0.
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f1cf2c4..e341bc3 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6900,6 +6900,7 @@
u16 fw_wr_seq;
u16 fw_drv_pulse_wr_seq;
+ u32 fw_last_msg;
int rx_max_ring;
int rx_ring_size;
@@ -7406,6 +7407,10 @@
#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
+#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
+#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
+#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 66c0df7..dbcff50 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3875,7 +3875,9 @@
xmit_type);
}
- /* Add the macs to the parsing BD this is a vf */
+ /* Add the macs to the parsing BD if this is a vf or if
+ * Tx Switching is enabled.
+ */
if (IS_VF(bp)) {
/* override GRE parameters in BD */
bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
@@ -3887,6 +3889,11 @@
&pbd_e2->data.mac_addr.dst_mid,
&pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
+ } else if (bp->flags & TX_SWITCHING) {
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
}
SET_FLAG(pbd_e2_parsing_data,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index fcf9105a5..09f3fef 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -342,7 +342,7 @@
while (retry < 3) {
rc = 0;
rcu_read_lock();
- ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+ ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
@@ -726,7 +726,7 @@
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
- dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
+ dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
@@ -785,7 +785,7 @@
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
@@ -794,8 +794,8 @@
if (!use_pg_tbl)
return 0;
- dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
- ~(BNX2_PAGE_SIZE - 1);
+ dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
+ ~(CNIC_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
@@ -900,8 +900,8 @@
if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
int i, k, arr_size;
- cp->ctx_blk_size = BNX2_PAGE_SIZE;
- cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
+ cp->ctx_blk_size = CNIC_PAGE_SIZE;
+ cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -933,7 +933,7 @@
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
@@ -1013,7 +1013,7 @@
if (udev->l2_ring)
return 0;
- udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
+ udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
@@ -1021,7 +1021,7 @@
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
- udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
@@ -1102,7 +1102,7 @@
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@@ -1113,7 +1113,7 @@
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
@@ -1267,14 +1267,14 @@
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
- pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
- PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+ CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
- n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+ n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
@@ -1296,7 +1296,7 @@
goto error;
}
- pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
@@ -1466,8 +1466,8 @@
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
- hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
+ hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
@@ -1477,9 +1477,9 @@
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1489,9 +1489,9 @@
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1504,9 +1504,9 @@
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1519,9 +1519,9 @@
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1623,18 +1623,18 @@
}
ctx->cid = cid;
- pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
if (ret)
goto error;
@@ -1760,7 +1760,7 @@
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
- req2->rq_page_table_addr_lo & PAGE_MASK;
+ req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
req2->rq_page_table_addr_hi;
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
@@ -1842,7 +1842,7 @@
/* CSTORM and USTORM initialization is different, CSTORM requires
* CQ DB base & not PTE addr */
ictx->cstorm_st_context.cq_db_base.lo =
- req1->cq_page_table_addr_lo & PAGE_MASK;
+ req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
@@ -2911,7 +2911,7 @@
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
u32 cmd;
int comp = 0;
@@ -3244,7 +3244,8 @@
int rc;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
+ lockdep_is_held(&cnic_lock));
if (ulp_ops && ulp_ops->cnic_get_stats)
rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
else
@@ -4384,7 +4385,7 @@
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
- memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
+ memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4628,7 +4629,7 @@
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
+ rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4639,11 +4640,11 @@
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
@@ -4709,10 +4710,10 @@
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
- if (BNX2_PAGE_BITS > 12)
+ if (CNIC_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
- val |= (BNX2_PAGE_BITS - 8) << 4;
+ val |= (CNIC_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
@@ -4742,13 +4743,13 @@
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4768,13 +4769,13 @@
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4918,7 +4919,7 @@
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
- memset(txbd, 0, BNX2_PAGE_SIZE);
+ memset(txbd, 0, CNIC_PAGE_SIZE);
buf_map = udev->l2_buf_map;
for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
@@ -4978,9 +4979,9 @@
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
- BNX2_PAGE_SIZE);
+ CNIC_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -5004,20 +5005,20 @@
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
@@ -5265,8 +5266,8 @@
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
- rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
- memset(rx_ring, 0, BNX2_PAGE_SIZE);
+ rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
+ memset(rx_ring, 0, CNIC_PAGE_SIZE);
}
static int cnic_register_netdev(struct cnic_dev *dev)
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0d6b13f..d535ae4 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 95a8e4b..dcbca69 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 8cf6b192..5f4d557 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.19"
-#define CNIC_MODULE_RELDATE "December 19, 2013"
+#define CNIC_MODULE_VERSION "2.5.20"
+#define CNIC_MODULE_RELDATE "March 14, 2014"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -24,6 +24,16 @@
#define MAX_CNIC_ULP_TYPE_EXT 3
#define MAX_CNIC_ULP_TYPE 4
+/* Use CPU native page size up to 16K for cnic ring sizes. */
+#if (PAGE_SHIFT > 14)
+#define CNIC_PAGE_BITS 14
+#else
+#define CNIC_PAGE_BITS PAGE_SHIFT
+#endif
+#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
+#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
+#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
+
struct kwqe {
u32 kwqe_op_flag;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3167ed6..70a225c8 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6843,8 +6843,7 @@
work_mask |= opaque_key;
- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ if (desc->err_vlan & RXD_ERR_MASK) {
drop_it:
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
@@ -17650,8 +17649,6 @@
tg3_init_bufmgr_config(tp);
- features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
@@ -17683,7 +17680,8 @@
features |= NETIF_F_TSO_ECN;
}
- dev->features |= features;
+ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= features;
/*
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ef47238..04321e5 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2608,7 +2608,11 @@
#define RXD_ERR_TOO_SMALL 0x00400000
#define RXD_ERR_NO_RESOURCES 0x00800000
#define RXD_ERR_HUGE_FRAME 0x01000000
-#define RXD_ERR_MASK 0xffff0000
+
+#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
+ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
+ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
+ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
u32 reserved;
u32 opaque;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 1803c39..354ae979 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1704,7 +1704,7 @@
while (!bfa_raw_sem_get(bar)) {
if (--n <= 0)
return BFA_STATUS_BADFLASH;
- udelay(10000);
+ mdelay(10);
}
return BFA_STATUS_OK;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index cf64f3d..4ad1187 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -707,7 +707,8 @@
else
skb_checksum_none_assert(skb);
- if (flags & BNA_CQ_EF_VLAN)
+ if ((flags & BNA_CQ_EF_VLAN) &&
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
@@ -2094,7 +2095,9 @@
rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
}
- rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rx_config->vlan_strip_status =
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
+ BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
}
static void
@@ -3245,11 +3248,6 @@
BNA_RXMODE_ALLMULTI;
bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- if (bnad->cfg_flags & BNAD_CF_PROMISC)
- bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
- else
- bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
-
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3374,6 +3372,27 @@
return 0;
}
+static int bnad_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnad *bnad = netdev_priv(dev);
+ netdev_features_t changed = features ^ dev->features;
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
bnad_netpoll(struct net_device *netdev)
@@ -3421,6 +3440,7 @@
.ndo_change_mtu = bnad_change_mtu,
.ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+ .ndo_set_features = bnad_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bnad_netpoll
#endif
@@ -3433,14 +3453,14 @@
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3190d38e..d0c38e0 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -632,11 +632,16 @@
"Unable to allocate sk_buff\n");
break;
}
- bp->rx_skbuff[entry] = skb;
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
bp->rx_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ bp->rx_skbuff[entry] = skb;
if (entry == RX_RING_SIZE - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -725,7 +730,7 @@
skb_put(skb, len);
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
dma_unmap_single(&bp->pdev->dev, addr,
- len, DMA_FROM_DEVICE);
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
skb_checksum_none_assert(skb);
@@ -1036,11 +1041,15 @@
}
entry = macb_tx_ring_wrap(bp->tx_head);
- bp->tx_head++;
netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ kfree_skb(skb);
+ goto unlock;
+ }
+ bp->tx_head++;
tx_skb = &bp->tx_skb[entry];
tx_skb->skb = skb;
tx_skb->mapping = mapping;
@@ -1066,6 +1075,7 @@
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
netif_stop_queue(dev);
+unlock:
spin_unlock_irqrestore(&bp->lock, flags);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 43ab35f..34e2488 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6179,6 +6179,7 @@
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
+ .shutdown = remove_one,
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d09615..05529e2 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -350,11 +350,13 @@
u32 roce_drops_crc;
};
+/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
+#define BE_RESET_VLAN_TAG_ID 0xFFFF
+
struct be_vf_cfg {
unsigned char mac_addr[ETH_ALEN];
int if_handle;
int pmac_id;
- u16 def_vid;
u16 vlan_tag;
u32 tx_rate;
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 04ac9c6..36c8061 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -913,24 +913,14 @@
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
-static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
- struct sk_buff *skb,
- bool *skip_hw_vlan)
+static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
{
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
unsigned int eth_hdr_len;
struct iphdr *ip;
- /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
- * may cause a transmit stall on that port. So the work-around is to
- * pad short packets (<= 32 bytes) to a 36-byte length.
- */
- if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
- if (skb_padto(skb, 36))
- goto tx_drop;
- skb->len = 36;
- }
-
/* For padded packets, BE HW modifies tot_len field in IP header
* incorrecly when VLAN tag is inserted by HW.
* For padded packets, Lancer computes incorrect checksum.
@@ -959,7 +949,7 @@
vlan_tx_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
/* HW may lockup when VLAN HW tagging is requested on
@@ -981,15 +971,39 @@
be_vlan_tag_tx_chk(adapter, skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
return skb;
tx_drop:
dev_kfree_skb_any(skb);
+err:
return NULL;
}
+static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
+{
+ /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
+ * less may cause a transmit stall on that port. So the work-around is
+ * to pad short packets (<= 32 bytes) to a 36-byte length.
+ */
+ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+ if (skb_padto(skb, 36))
+ return NULL;
+ skb->len = 36;
+ }
+
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
+ if (!skb)
+ return NULL;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1157,6 +1171,14 @@
return status;
}
+static void be_clear_promisc(struct be_adapter *adapter)
+{
+ adapter->promiscuous = false;
+ adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+
+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+}
+
static void be_set_rx_mode(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1170,9 +1192,7 @@
/* BE was previously in promiscuous mode; disable it */
if (adapter->promiscuous) {
- adapter->promiscuous = false;
- be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
-
+ be_clear_promisc(adapter);
if (adapter->vlans_added)
be_vid_config(adapter);
}
@@ -1287,24 +1307,20 @@
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
- if (vf_cfg->vlan_tag != vlan) {
- /* If this is new value, program it. Else skip. */
- vf_cfg->vlan_tag = vlan;
+ if (vf_cfg->vlan_tag != vlan)
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
vf_cfg->if_handle, 0);
- }
} else {
/* Reset Transparent Vlan Tagging. */
- vf_cfg->vlan_tag = 0;
- vlan = vf_cfg->def_vid;
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- vf_cfg->if_handle, 0);
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
+ vf + 1, vf_cfg->if_handle, 0);
}
-
- if (status)
+ if (!status)
+ vf_cfg->vlan_tag = vlan;
+ else
dev_info(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed\n", vlan, vf);
+ "VLAN %d config on VF %d failed\n", vlan, vf);
return status;
}
@@ -3013,11 +3029,11 @@
static int be_vf_setup(struct be_adapter *adapter)
{
- struct be_vf_cfg *vf_cfg;
- u16 def_vlan, lnk_speed;
- int status, old_vfs, vf;
struct device *dev = &adapter->pdev->dev;
+ struct be_vf_cfg *vf_cfg;
+ int status, old_vfs, vf;
u32 privileges;
+ u16 lnk_speed;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -3084,12 +3100,6 @@
if (!status)
vf_cfg->tx_rate = lnk_speed;
- status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle, NULL);
- if (status)
- goto err;
- vf_cfg->def_vid = def_vlan;
-
if (!old_vfs)
be_cmd_enable_vf(adapter, vf + 1);
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 903362a..03a3513 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -389,12 +389,6 @@
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_OK;
}
- /* Send it on its way. Tell FEC it's ready, interrupt when done,
- * it's the last BD of the frame, and to put the CRC on the end.
- */
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
- bdp->cbd_sc = status;
if (fep->bufdesc_ex) {
@@ -416,6 +410,13 @@
}
}
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
bdp_pre = fec_enet_get_prevdesc(bdp, fep);
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
@@ -527,13 +528,6 @@
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
- /* Setup multicast filter. */
- set_multicast_list(ndev);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
@@ -654,6 +648,13 @@
writel(rcntl, fep->hwp + FEC_R_CNTRL);
+ /* Setup multicast filter. */
+ set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= (1 << 8);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4be9715..1fc8334 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -522,10 +522,21 @@
return rc;
}
+static u64 ibmveth_encode_mac_addr(u8 *mac)
+{
+ int i;
+ u64 encoded = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ encoded = (encoded << 8) | mac[i];
+
+ return encoded;
+}
+
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- u64 mac_address = 0;
+ u64 mac_address;
int rxq_entries = 1;
unsigned long lpar_rc;
int rc;
@@ -579,8 +590,7 @@
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
- mac_address = mac_address >> 16;
+ mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -1183,8 +1193,8 @@
/* add the addresses to the filter table */
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
- unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
+ u64 mcast_addr;
+ mcast_addr = ibmveth_encode_mac_addr(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1372,9 +1382,6 @@
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
-
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
@@ -1383,7 +1390,7 @@
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
- memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 451ba79..1f37499 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -138,7 +138,6 @@
struct napi_struct napi;
struct net_device_stats stats;
unsigned int mcastFilterSize;
- unsigned long mac_addr;
void * buffer_list_addr;
void * filter_list_addr;
dma_addr_t buffer_list_dma;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f418f4f..8d76fca 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@@ -88,8 +89,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
-#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
+#define MVNETA_RGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -161,7 +163,7 @@
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
-#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
+#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
#define MVNETA_GMAC2_PORT_RESET BIT(6)
#define MVNETA_GMAC_STATUS 0x2c10
@@ -710,35 +712,6 @@
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- if (enable)
- val |= MVNETA_GMAC2_PORT_RGMII;
- else
- val &= ~MVNETA_GMAC2_PORT_RGMII;
-
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
- u32 val;
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_PSC_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
- mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -2756,12 +2729,15 @@
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
- mvneta_port_sgmii_config(pp);
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+ else
+ mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
- mvneta_gmac_rgmii_set(pp, 1);
+ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+
+ val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
/* Cancel Port Reset */
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
@@ -2774,6 +2750,7 @@
static int mvneta_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram_target_info;
+ struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
u32 phy_addr;
@@ -2838,9 +2815,15 @@
clk_prepare_enable(pp->clk);
- pp->base = of_iomap(dn, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENODEV;
+ goto err_clk;
+ }
+
+ pp->base = devm_ioremap_resource(&pdev->dev, res);
if (pp->base == NULL) {
- err = -ENOMEM;
+ err = PTR_ERR(pp->base);
goto err_clk;
}
@@ -2848,7 +2831,7 @@
pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
- goto err_unmap;
+ goto err_clk;
}
for_each_possible_cpu(cpu) {
@@ -2913,8 +2896,6 @@
mvneta_deinit(pp);
err_free_stats:
free_percpu(pp->stats);
-err_unmap:
- iounmap(pp->base);
err_clk:
clk_disable_unprepare(pp->clk);
err_free_irq:
@@ -2934,7 +2915,6 @@
mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
free_percpu(pp->stats);
- iounmap(pp->base);
irq_dispose_mapping(dev->irq);
free_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fad4531..84a96f7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -742,6 +742,14 @@
err = mlx4_en_uc_steer_add(priv, new_mac,
&qpn,
&entry->reg_id);
+ if (err)
+ return err;
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+ err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
+ &priv->tunnel_reg_id);
return err;
}
}
@@ -1792,6 +1800,8 @@
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
+ if (mclist->tunnel_reg_id)
+ mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
}
mlx4_en_clear_list(dev);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 91b69ff..7e2995e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -129,13 +129,14 @@
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
- [3] = "Device manage flow steering support",
+ [3] = "Device managed flow steering support",
[4] = "Automatic MAC reassignment support",
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
[8] = "Dynamic QP updates support",
- [9] = "TCP/IP offloads/flow-steering for VXLAN support"
+ [9] = "Device managed flow steering IPoIB support",
+ [10] = "TCP/IP offloads/flow-steering for VXLAN support"
};
int i;
@@ -859,7 +860,7 @@
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
/* For guests, disable vxlan tunneling */
- MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
field &= 0xf7;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
@@ -869,7 +870,7 @@
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
/* For guests, disable mw type 2 */
- MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
@@ -883,7 +884,7 @@
}
/* turn off ipoib managed steering for guests */
- MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
field &= ~0x80;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d711158..d413e60 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -150,6 +150,8 @@
struct pci_dev *pdev;
};
+static atomic_t pf_loading = ATOMIC_INIT(0);
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -749,7 +751,7 @@
has_eth_port = true;
}
- if (has_ib_port)
+ if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
request_module_nowait(IB_DRV_NAME);
if (has_eth_port)
request_module_nowait(EN_DRV_NAME);
@@ -1407,6 +1409,11 @@
u32 slave_read;
u32 cmd_channel_ver;
+ if (atomic_read(&pf_loading)) {
+ mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
mutex_lock(&priv->cmd.slave_cmd_mutex);
priv->cmd.max_cmds = 1;
mlx4_warn(dev, "Sending reset\n");
@@ -2319,7 +2326,11 @@
if (num_vfs) {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
+
+ atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, num_vfs);
+ atomic_dec(&pf_loading);
+
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
err);
@@ -2670,7 +2681,11 @@
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- int ret = __mlx4_init_one(pdev, 0);
+ const struct pci_device_id *id;
+ int ret;
+
+ id = pci_match_id(mlx4_pci_table, pdev);
+ ret = __mlx4_init_one(pdev, id->driver_data);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
@@ -2684,6 +2699,7 @@
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
+ .shutdown = mlx4_remove_one,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 6b65f77..7aec6c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -51,8 +51,8 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "1.1"
-#define DRV_RELDATE "Dec, 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb, 2014"
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9ca223b..b57e8c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -57,8 +57,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.0"
-#define DRV_RELDATE "Dec 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a064f06..23b7e2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -46,8 +46,8 @@
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 727b546a..e0c92e0 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -23,6 +23,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -83,6 +84,7 @@
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg: Optional regulator supplying the chip
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -130,6 +132,7 @@
struct spi_transfer spi_xfer2[2];
struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
};
static int msg_enable;
@@ -1414,6 +1417,21 @@
ks->spidev = spi;
ks->tx_space = 6144;
+ ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ if (ret == -EPROBE_DEFER)
+ goto err_reg;
+ } else {
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator enable fail: %d\n",
+ ret);
+ goto err_reg_en;
+ }
+ }
+
+
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1508,8 +1526,14 @@
err_netdev:
free_irq(ndev->irq, ks);
-err_id:
err_irq:
+err_id:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_disable(ks->vdd_reg);
+err_reg_en:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_put(ks->vdd_reg);
+err_reg:
free_netdev(ndev);
return ret;
}
@@ -1523,6 +1547,10 @@
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
+ if (!IS_ERR(priv->vdd_reg)) {
+ regulator_disable(priv->vdd_reg);
+ regulator_put(priv->vdd_reg);
+ }
free_netdev(priv->netdev);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 4146664..27c4f13 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -340,6 +340,7 @@
if (qlcnic_sriov_vf_check(adapter))
return -EINVAL;
num_msix = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 77f1bce..7d4f549 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -807,7 +807,7 @@
!type->tc_param_valid)
return;
- if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+ if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
return;
tc_cfg = &type->tc_cfg[tc];
@@ -843,7 +843,7 @@
!type->tc_param_valid)
return;
- if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+ if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
return;
pgcfg = &type->pg_cfg[pgid];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ba78c74..1222865 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -816,9 +816,10 @@
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
qlcnic_disable_multi_tx(adapter);
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
+ if (err)
return err;
}
}
@@ -3863,7 +3864,7 @@
strcpy(buf, "Tx");
}
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 09acf15..e5277a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -13,8 +13,6 @@
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
#define QLC_MAC_OPCODE_MASK 0x7
-#define QLC_MAC_STAR_ADD 6
-#define QLC_MAC_STAR_DEL 7
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
@@ -1206,13 +1204,6 @@
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
- if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) ||
- ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) {
- netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n",
- vf->pci_func);
- return -EINVAL;
- }
-
if (!(cmd->req.arg[1] & BIT_8))
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ce2cfdd..656c65d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4765,7 +4765,9 @@
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
- ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91a67ae..3ff7bc3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -209,7 +209,7 @@
[RTL_GIGA_MAC_VER_16] =
_R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
[RTL_GIGA_MAC_VER_18] =
_R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_19] =
@@ -7118,6 +7118,8 @@
}
mutex_init(&tp->wk.mutex);
+ u64_stats_init(&tp->rx_stats.syncp);
+ u64_stats_init(&tp->tx_stats.syncp);
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index eb75fbd..d7a3682 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1668,6 +1668,13 @@
struct efx_ptp_data *ptp = efx->ptp_data;
int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+ if (!ptp) {
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Received PTP event but PTP not set up\n");
+ return;
+ }
+
if (!ptp->enabled)
return;
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 72d282b..c553f6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -151,7 +151,7 @@
sizeof(struct dma_desc)));
}
-const struct stmmac_chain_mode_ops chain_mode_ops = {
+const struct stmmac_mode_ops chain_mode_ops = {
.init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7834a39..74610f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -419,20 +419,13 @@
unsigned int data; /* MII Data */
};
-struct stmmac_ring_mode_ops {
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
- void (*refill_desc3) (void *priv, struct dma_desc *p);
- void (*init_desc3) (struct dma_desc *p);
- void (*clean_desc3) (void *priv, struct dma_desc *p);
- int (*set_16kib_bfsize) (int mtu);
-};
-
-struct stmmac_chain_mode_ops {
+struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ int (*set_16kib_bfsize)(int mtu);
+ void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p);
};
@@ -441,8 +434,7 @@
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
- const struct stmmac_ring_mode_ops *ring;
- const struct stmmac_chain_mode_ops *chain;
+ const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
@@ -460,7 +452,7 @@
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_ring_mode_ops ring_mode_ops;
-extern const struct stmmac_chain_mode_ops chain_mode_ops;
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index a96c7c2..650a4be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -100,10 +100,9 @@
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- if (unlikely(priv->plat->has_gmac))
- /* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
/* In ring mode we need to fill the desc3 because it is used as buffer */
@@ -126,7 +125,7 @@
return ret;
}
-const struct stmmac_ring_mode_ops ring_mode_ops = {
+const struct stmmac_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a2e7d2c..8543e1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -92,8 +92,8 @@
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define DMA_BUFFER_SIZE BUF_SIZE_4KiB
-static int buf_sz = DMA_BUFFER_SIZE;
+#define DEFAULT_BUFSIZE 1536
+static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
@@ -136,8 +136,8 @@
dma_rxsize = DMA_RX_SIZE;
if (unlikely(dma_txsize < 0))
dma_txsize = DMA_TX_SIZE;
- if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DEFAULT_BUFSIZE;
if (unlikely(flow_ctrl > 1))
flow_ctrl = FLOW_AUTO;
else if (likely(flow_ctrl < 0))
@@ -286,10 +286,25 @@
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
- /* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1))
- goto out;
+ int tx_lpi_timer = priv->tx_lpi_timer;
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1)) {
+ /* To manage at run-time if the EEE cannot be supported
+ * anymore (for example because the lp caps have been
+ * changed).
+ * In that case the driver disable own timers.
+ */
+ if (priv->eee_active) {
+ pr_debug("stmmac: disable EEE\n");
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
+ tx_lpi_timer);
+ }
+ priv->eee_active = 0;
+ goto out;
+ }
+ /* Activate the EEE and start timers */
if (!priv->eee_active) {
priv->eee_active = 1;
init_timer(&priv->eee_ctrl_timer);
@@ -300,13 +315,13 @@
priv->hw->mac->set_eee_timer(priv->ioaddr,
STMMAC_DEFAULT_LIT_LS,
- priv->tx_lpi_timer);
+ tx_lpi_timer);
} else
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->ioaddr,
priv->phydev->link);
- pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
ret = true;
}
@@ -886,10 +901,10 @@
ret = BUF_SIZE_8KiB;
else if (mtu >= BUF_SIZE_2KiB)
ret = BUF_SIZE_4KiB;
- else if (mtu >= DMA_BUFFER_SIZE)
+ else if (mtu > DEFAULT_BUFSIZE)
ret = BUF_SIZE_2KiB;
else
- ret = DMA_BUFFER_SIZE;
+ ret = DEFAULT_BUFSIZE;
return ret;
}
@@ -951,9 +966,9 @@
p->des2 = priv->rx_skbuff_dma[i];
- if ((priv->mode == STMMAC_RING_MODE) &&
+ if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
- priv->hw->ring->init_desc3(p);
+ priv->hw->mode->init_desc3(p);
return 0;
}
@@ -984,11 +999,8 @@
unsigned int bfsize = 0;
int ret = -ENOMEM;
- /* Set the max buffer size according to the DESC mode
- * and the MTU. Note that RING mode allows 16KiB bsize.
- */
- if (priv->mode == STMMAC_RING_MODE)
- bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+ if (priv->hw->mode->set_16kib_bfsize)
+ bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
@@ -1029,15 +1041,15 @@
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) {
- priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
- rxsize, 1);
- priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
- txsize, 1);
+ priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
+ rxsize, 1);
+ priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
+ txsize, 1);
} else {
- priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
- rxsize, 0);
- priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
- txsize, 0);
+ priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
+ rxsize, 0);
+ priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
+ txsize, 0);
}
}
@@ -1288,7 +1300,7 @@
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = 0;
}
- priv->hw->ring->clean_desc3(priv, p);
+ priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
@@ -1705,7 +1717,7 @@
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- alloc_dma_desc_resources(priv);
+ ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
pr_err("%s: DMA descriptors allocation failed\n", __func__);
goto dma_desc_error;
@@ -1844,6 +1856,7 @@
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int enh_desc = priv->plat->enh_desc;
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -1871,27 +1884,19 @@
first = desc;
/* To program the descriptors according to the size of the frame */
- if (priv->mode == STMMAC_RING_MODE) {
- is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->ring->jumbo_frm(priv, skb,
- csum_insertion);
- } else {
- is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->chain->jumbo_frm(priv, skb,
- csum_insertion);
- }
+ if (enh_desc)
+ is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
+
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
- } else
+ } else {
desc = first;
+ entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ }
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2029,7 +2034,7 @@
p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->ring->refill_desc3(priv, p);
+ priv->hw->mode->refill_desc3(priv, p);
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
@@ -2633,11 +2638,11 @@
/* To use the chained or ring mode */
if (chain_mode) {
- priv->hw->chain = &chain_mode_ops;
+ priv->hw->mode = &chain_mode_ops;
pr_info(" Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE;
} else {
- priv->hw->ring = &ring_mode_ops;
+ priv->hw->mode = &ring_mode_ops;
pr_info(" Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index c61bc72b..8fb32a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -36,7 +36,7 @@
#ifdef CONFIG_DWMAC_STI
{ .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
{ .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
- { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
#endif
/* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 651087b..7d6d8ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1164,11 +1164,17 @@
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
+ u32 slave_port;
+
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
if (!slave->phy)
return;
phy_stop(slave->phy);
phy_disconnect(slave->phy);
slave->phy = NULL;
+ cpsw_ale_control_set(priv->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -2223,10 +2229,6 @@
goto clean_ale_ret;
}
- if (cpts_register(&pdev->dev, priv->cpts,
- data->cpts_clock_mult, data->cpts_clock_shift))
- dev_err(priv->dev, "error registering cpts device\n");
-
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
&ss_res->start, ndev->irq);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 364d0c7..88ef270 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -355,7 +355,7 @@
int i;
spin_lock_irqsave(&ctlr->lock, flags);
- if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ if (ctlr->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
@@ -891,7 +891,7 @@
unsigned timeout;
spin_lock_irqsave(&chan->lock, flags);
- if (chan->state != CPDMA_STATE_ACTIVE) {
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index cd9b164..8f0e69c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1532,9 +1532,9 @@
struct device *emac_dev = &ndev->dev;
u32 cnt;
struct resource *res;
- int ret;
+ int q, m, ret;
+ int res_num = 0, irq_num = 0;
int i = 0;
- int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
pm_runtime_get(&priv->pdev->dev);
@@ -1564,15 +1564,24 @@
}
/* Request IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
+ res_num))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+ dev_err(emac_dev, "Request IRQ %d\n", irq_num);
+ if (request_irq(irq_num, emac_irq, 0, ndev->name,
+ ndev)) {
+ dev_err(emac_dev,
+ "DaVinci EMAC: request_irq() failed\n");
+ ret = -EBUSY;
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
- for (i = res->start; i <= res->end; i++) {
- if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
- 0, ndev->name, ndev))
goto rollback;
+ }
}
- k++;
+ res_num++;
}
+ /* prepare counters for rollback in case of an error */
+ res_num--;
+ irq_num--;
/* Start/Enable EMAC hardware */
emac_hw_enable(priv);
@@ -1639,11 +1648,23 @@
return 0;
-rollback:
-
- dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
- ret = -EBUSY;
err:
+ emac_int_disable(priv);
+ napi_disable(&priv->napi);
+
+rollback:
+ for (q = res_num; q >= 0; q--) {
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+ /* at the first iteration, irq_num is already set to the
+ * right value
+ */
+ if (q != res_num)
+ irq_num = res->end;
+
+ for (m = irq_num; m >= res->start; m--)
+ free_irq(m, ndev);
+ }
+ cpdma_ctlr_stop(priv->dma);
pm_runtime_put(&priv->pdev->dev);
return ret;
}
@@ -1659,6 +1680,9 @@
*/
static int emac_dev_stop(struct net_device *ndev)
{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
@@ -1674,6 +1698,13 @@
if (priv->phydev)
phy_disconnect(priv->phydev);
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ef312bc..6ac20a6 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -923,7 +923,7 @@
if (rc) {
dev_err(&pdev->dev,
"32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out;
+ goto err_out_pci_disable;
}
/* sanity check */
@@ -931,7 +931,7 @@
(pci_resource_len(pdev, 1) < io_size)) {
rc = -EIO;
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out;
+ goto err_out_pci_disable;
}
pioaddr = pci_resource_start(pdev, 0);
@@ -942,7 +942,7 @@
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out;
+ goto err_out_pci_disable;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1084,6 +1084,8 @@
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
+err_out_pci_disable:
+ pci_disable_device(pdev);
err_out:
return rc;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7141a19..d6fce97 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -442,6 +442,8 @@
if (!net)
return -ENOMEM;
+ netif_carrier_off(net);
+
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
hv_set_drvdata(dev, net);
@@ -473,6 +475,8 @@
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
free_netdev(net);
+ } else {
+ schedule_delayed_work(&net_device_ctx->dwork, 0);
}
return ret;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1084e5d..b54fd25 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -243,6 +243,22 @@
return ret;
}
+static void rndis_set_link_state(struct rndis_device *rdev,
+ struct rndis_request *request)
+{
+ u32 link_status;
+ struct rndis_query_complete *query_complete;
+
+ query_complete = &request->response_msg.msg.query_complete;
+
+ if (query_complete->status == RNDIS_STATUS_SUCCESS &&
+ query_complete->info_buflen == sizeof(u32)) {
+ memcpy(&link_status, (void *)((unsigned long)query_complete +
+ query_complete->info_buf_offset), sizeof(u32));
+ rdev->link_state = link_status != 0;
+ }
+}
+
static void rndis_filter_receive_response(struct rndis_device *dev,
struct rndis_message *resp)
{
@@ -272,6 +288,10 @@
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp,
resp->msg_len);
+ if (request->request_msg.ndis_msg_type ==
+ RNDIS_MSG_QUERY && request->request_msg.msg.
+ query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
+ rndis_set_link_state(dev, request);
} else {
netdev_err(ndev,
"rndis response buffer overflow "
@@ -620,7 +640,6 @@
ret = rndis_filter_query_device(dev,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
&link_status, &size);
- dev->link_state = (link_status != 0) ? true : false;
return ret;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ab31544..a30258a 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -546,12 +546,12 @@
int rc;
unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->irq_busy) {
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
return -EBUSY;
}
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
might_sleep();
@@ -725,10 +725,11 @@
static irqreturn_t at86rf230_isr(int irq, void *data)
{
struct at86rf230_local *lp = data;
+ unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
lp->irq_busy = 1;
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
schedule_work(&lp->irqwork);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index c14d39b..d7b2e94 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -180,7 +180,8 @@
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
- dev->vlan_features |= IFB_FEATURES;
+ dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a5d2189..1831fb7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -506,6 +506,9 @@
static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
+#define ALWAYS_ON_FEATURES \
+ (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -539,7 +542,7 @@
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX;
+ dev->features |= ALWAYS_ON_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
@@ -699,7 +702,7 @@
features = netdev_increment_features(vlan->lowerdev->features,
features,
mask);
- features |= NETIF_F_LLTX;
+ features |= ALWAYS_ON_FEATURES;
return features;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 19c9eca..76d96b9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -164,9 +164,9 @@
* of that setting. Returns the index of the last setting if
* none of the others match.
*/
-static inline int phy_find_setting(int speed, int duplex)
+static inline unsigned int phy_find_setting(int speed, int duplex)
{
- int idx = 0;
+ unsigned int idx = 0;
while (idx < ARRAY_SIZE(settings) &&
(settings[idx].speed != speed || settings[idx].duplex != duplex))
@@ -185,7 +185,7 @@
* the mask in features. Returns the index of the last setting
* if nothing else matches.
*/
-static inline int phy_find_valid(int idx, u32 features)
+static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
{
while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
idx++;
@@ -204,7 +204,7 @@
static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
- int idx;
+ unsigned int idx;
/* Sanitize settings based on PHY capabilities */
if ((features & SUPPORTED_Autoneg) == 0)
@@ -954,7 +954,8 @@
(phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
- int idx, status;
+ int status;
+ unsigned int idx;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 82514e7..2f6989b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -683,10 +683,9 @@
int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
- struct ethtool_wolinfo wol;
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
/* If the device has WOL enabled, we cannot suspend the PHY */
- wol.cmd = ETHTOOL_GWOL;
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts)
return -EBUSY;
@@ -916,6 +915,8 @@
int err;
int lpa;
int lpagb = 0;
+ int common_adv;
+ int common_adv_gb = 0;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
@@ -937,7 +938,7 @@
phydev->lp_advertising =
mii_stat1000_to_ethtool_lpa_t(lpagb);
- lpagb &= adv << 2;
+ common_adv_gb = lpagb & adv << 2;
}
lpa = phy_read(phydev, MII_LPA);
@@ -950,25 +951,25 @@
if (adv < 0)
return adv;
- lpa &= adv;
+ common_adv = lpa & adv;
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
phydev->pause = 0;
phydev->asym_pause = 0;
- if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+ if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) {
phydev->speed = SPEED_1000;
- if (lpagb & LPA_1000FULL)
+ if (common_adv_gb & LPA_1000FULL)
phydev->duplex = DUPLEX_FULL;
- } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ } else if (common_adv & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
- if (lpa & LPA_100FULL)
+ if (common_adv & LPA_100FULL)
phydev->duplex = DUPLEX_FULL;
} else
- if (lpa & LPA_10FULL)
+ if (common_adv & LPA_10FULL)
phydev->duplex = DUPLEX_FULL;
if (phydev->duplex == DUPLEX_FULL) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8fe9cb7..26f8635 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1686,7 +1686,9 @@
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 433f0a0..e2797f1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -11,7 +11,7 @@
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
+obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 955df81..054e59c 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,20 +1029,12 @@
dev->mii.phy_id = 0x03;
dev->mii.supports_gmii = 1;
- if (usb_device_no_sg_constraint(dev->udev))
- dev->can_dma_sg = 1;
-
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
- if (dev->can_dma_sg) {
- dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
- dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- }
-
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
@@ -1395,6 +1387,19 @@
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info dlink_dub1312_info = {
+ .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct driver_info sitecom_info = {
.description = "Sitecom USB 3.0 to Gigabit Adapter",
.bind = ax88179_bind,
@@ -1421,6 +1426,19 @@
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info lenovo_info = {
+ .description = "Lenovo OneLinkDock Gigabit LAN",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
@@ -1431,6 +1449,10 @@
USB_DEVICE(0x0b95, 0x178a),
.driver_info = (unsigned long)&ax88178a_info,
}, {
+ /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
+ USB_DEVICE(0x2001, 0x4a00),
+ .driver_info = (unsigned long)&dlink_dub1312_info,
+}, {
/* Sitecom USB 3.0 to Gigabit Adapter */
USB_DEVICE(0x0df6, 0x0072),
.driver_info = (unsigned long)&sitecom_info,
@@ -1438,6 +1460,10 @@
/* Samsung USB Ethernet Adapter */
USB_DEVICE(0x04e8, 0xa100),
.driver_info = (unsigned long)&samsung_info,
+}, {
+ /* Lenovo OneLinkDock Gigabit LAN */
+ USB_DEVICE(0x17ef, 0x304b),
+ .driver_info = (unsigned long)&lenovo_info,
},
{ },
};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 42e1769..bd363b2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -652,6 +652,13 @@
.driver_info = 0,
},
+/* Samsung USB Ethernet Adapters */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dbff290..d350d27 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -68,7 +68,6 @@
static int cdc_ncm_setup(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
@@ -82,22 +81,22 @@
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
- 0, iface_no, &ncm_parm,
- sizeof(ncm_parm));
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm));
if (err < 0) {
dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
- ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
- ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
- ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
- ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
- ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
+ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
- ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
+ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
/* there are some minor differences in NCM and MBIM defaults */
if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
@@ -146,7 +145,7 @@
}
/* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
+ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -162,14 +161,6 @@
dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
-
- /* Adding a pad byte here simplifies the handling in
- * cdc_ncm_fill_tx_frame, by making tx_max always
- * represent the real skb max size.
- */
- if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
- ctx->tx_max++;
-
}
/*
@@ -439,6 +430,10 @@
goto error2;
}
+ /* initialize data interface */
+ if (cdc_ncm_setup(dev))
+ goto error2;
+
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
@@ -453,12 +448,6 @@
goto error2;
}
- /* initialize data interface */
- if (cdc_ncm_setup(dev)) {
- dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
- goto error2;
- }
-
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
@@ -475,6 +464,15 @@
dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
+ /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
+ * outside the sane range. Adding a pad byte here if necessary
+ * simplifies the handling in cdc_ncm_fill_tx_frame, making
+ * tx_max always represent the real skb max size.
+ */
+ if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+ ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ ctx->tx_max++;
+
return 0;
error2:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d89dbe3..adb12f3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -449,9 +449,6 @@
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
-#define REALTEK_USB_DEVICE(vend, prod) \
- USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
-
struct rx_desc {
__le32 opts1;
#define RX_LEN_MASK 0x7fff
@@ -2739,6 +2736,12 @@
struct net_device *netdev;
int ret;
+ if (udev->actconfig->desc.bConfigurationValue != 1) {
+ usb_driver_set_configuration(udev, 1);
+ return -ENODEV;
+ }
+
+ usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
dev_err(&intf->dev, "Out of memory\n");
@@ -2819,9 +2822,9 @@
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
- {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
+ {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
{}
};
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
deleted file mode 100644
index f0a8791..0000000
--- a/drivers/net/usb/r815x.c
+++ /dev/null
@@ -1,248 +0,0 @@
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/mii.h>
-#include <linux/usb.h>
-#include <linux/usb/cdc.h>
-#include <linux/usb/usbnet.h>
-
-#define RTL815x_REQT_READ 0xc0
-#define RTL815x_REQT_WRITE 0x40
-#define RTL815x_REQ_GET_REGS 0x05
-#define RTL815x_REQ_SET_REGS 0x05
-
-#define MCU_TYPE_PLA 0x0100
-#define OCP_BASE 0xe86c
-#define BASE_MII 0xa400
-
-#define BYTE_EN_DWORD 0xff
-#define BYTE_EN_WORD 0x33
-#define BYTE_EN_BYTE 0x11
-
-#define R815x_PHY_ID 32
-#define REALTEK_VENDOR_ID 0x0bda
-
-
-static int pla_read_word(struct usb_device *udev, u16 index)
-{
- int ret;
- u8 shift = index & 2;
- __le32 *tmp;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- index &= ~3;
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out2;
-
- ret = __le32_to_cpu(*tmp);
- ret >>= (shift * 8);
- ret &= 0xffff;
-
-out2:
- kfree(tmp);
- return ret;
-}
-
-static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
-{
- __le32 *tmp;
- u32 mask = 0xffff;
- u16 byen = BYTE_EN_WORD;
- u8 shift = index & 2;
- int ret;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- data &= mask;
-
- if (shift) {
- byen <<= shift;
- mask <<= (shift * 8);
- data <<= (shift * 8);
- index &= ~3;
- }
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out3;
-
- data |= __le32_to_cpu(*tmp) & ~mask;
- *tmp = __cpu_to_le32(data);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
- index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
- 500);
-
-out3:
- kfree(tmp);
- return ret;
-}
-
-static int ocp_reg_read(struct usbnet *dev, u16 addr)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_read_word(dev->udev, ocp_index);
-
-out:
- return ret;
-}
-
-static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out1;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_write_word(dev->udev, ocp_index, data);
-
-out1:
- return ret;
-}
-
-static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
-{
- struct usbnet *dev = netdev_priv(netdev);
- int ret;
-
- if (phy_id != R815x_PHY_ID)
- return -EINVAL;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return -ENODEV;
-
- ret = ocp_reg_read(dev, BASE_MII + reg * 2);
-
- usb_autopm_put_interface(dev->intf);
- return ret;
-}
-
-static
-void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
-{
- struct usbnet *dev = netdev_priv(netdev);
-
- if (phy_id != R815x_PHY_ID)
- return;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return;
-
- ocp_reg_write(dev, BASE_MII + reg * 2, val);
-
- usb_autopm_put_interface(dev->intf);
-}
-
-static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 1;
-
- return status;
-}
-
-static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 0;
-
- return status;
-}
-
-static const struct driver_info r8152_info = {
- .description = "RTL8152 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8152_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct driver_info r8153_info = {
- .description = "RTL8153 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8153_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct usb_device_id products[] = {
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &r8152_info,
-},
-
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &r8153_info,
-},
-
- { }, /* END */
-};
-MODULE_DEVICE_TABLE(usb, products);
-
-static struct usb_driver r815x_driver = {
- .name = "r815x",
- .id_table = products,
- .probe = usbnet_probe,
- .disconnect = usbnet_disconnect,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
- .reset_resume = usbnet_resume,
- .supports_autosuspend = 1,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(r815x_driver);
-
-MODULE_AUTHOR("Hayes Wang");
-MODULE_DESCRIPTION("Realtek USB ECM device");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dd10d58..f9e96c4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -752,14 +752,12 @@
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
DECLARE_WAITQUEUE(wait, current);
int temp;
/* ensure there are no more active urbs */
- add_wait_queue(&unlink_wakeup, &wait);
+ add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- dev->wait = &unlink_wakeup;
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
@@ -773,15 +771,14 @@
"waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
- dev->wait = NULL;
- remove_wait_queue(&unlink_wakeup, &wait);
+ remove_wait_queue(&dev->wait, &wait);
}
int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval;
+ int retval, pm;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -791,6 +788,8 @@
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
+ /* to not race resume */
+ pm = usb_autopm_get_interface(dev->intf);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
@@ -817,6 +816,9 @@
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
+ if (!pm)
+ usb_autopm_put_interface(dev->intf);
+
if (info->manage_power &&
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
@@ -1437,11 +1439,12 @@
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
- // waiting for all pending urbs to complete?
- if (dev->wait) {
- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
- wake_up (dev->wait);
- }
+ /* waiting for all pending urbs to complete?
+ * only then can we forgo submitting anew
+ */
+ if (waitqueue_active(&dev->wait)) {
+ if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
+ wake_up_all(&dev->wait);
// or are we maybe short a few urbs?
} else if (netif_running (dev->net) &&
@@ -1580,6 +1583,7 @@
dev->driver_name = name;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+ init_waitqueue_head(&dev->wait);
skb_queue_head_init (&dev->rxq);
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
@@ -1791,9 +1795,10 @@
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
- /* handle remote wakeup ASAP */
- if (!dev->wait &&
- netif_device_present(dev->net) &&
+ /* handle remote wakeup ASAP
+ * we cannot race against stop
+ */
+ if (netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_NOIO);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2ec2041..c0e7c64 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -285,7 +285,11 @@
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d75f8ed..841b608 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -671,8 +671,7 @@
if (err)
break;
} while (rq->vq->num_free);
- if (unlikely(!virtqueue_kick(rq->vq)))
- return false;
+ virtqueue_kick(rq->vq);
return !oom;
}
@@ -877,7 +876,7 @@
err = xmit_skb(sq, skb);
/* This should not happen! */
- if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
+ if (unlikely(err)) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
@@ -886,6 +885,7 @@
kfree_skb(skb);
return NETDEV_TX_OK;
}
+ virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -1711,7 +1711,8 @@
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3be786f..0fa3b44 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1762,11 +1762,20 @@
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_all_intrs(adapter);
-
- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
- vmxnet3_enable_all_intrs(adapter);
+ switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ default:
+ vmxnet3_intr(0, adapter->netdev);
+ break;
+ }
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b0f705c..1236812 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1318,6 +1318,9 @@
neigh_release(n);
+ if (reply == NULL)
+ goto out;
+
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1339,15 +1342,103 @@
}
#if IS_ENABLED(CONFIG_IPV6)
+
+static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+ struct neighbour *n, bool isrouter)
+{
+ struct net_device *dev = request->dev;
+ struct sk_buff *reply;
+ struct nd_msg *ns, *na;
+ struct ipv6hdr *pip6;
+ u8 *daddr;
+ int na_olen = 8; /* opt hdr + ETH_ALEN for target */
+ int ns_olen;
+ int i, len;
+
+ if (dev == NULL)
+ return NULL;
+
+ len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
+ sizeof(*na) + na_olen + dev->needed_tailroom;
+ reply = alloc_skb(len, GFP_ATOMIC);
+ if (reply == NULL)
+ return NULL;
+
+ reply->protocol = htons(ETH_P_IPV6);
+ reply->dev = dev;
+ skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
+ skb_push(reply, sizeof(struct ethhdr));
+ skb_set_mac_header(reply, 0);
+
+ ns = (struct nd_msg *)skb_transport_header(request);
+
+ daddr = eth_hdr(request)->h_source;
+ ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+ }
+ }
+
+ /* Ethernet header */
+ ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
+ ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
+ eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
+ reply->protocol = htons(ETH_P_IPV6);
+
+ skb_pull(reply, sizeof(struct ethhdr));
+ skb_set_network_header(reply, 0);
+ skb_put(reply, sizeof(struct ipv6hdr));
+
+ /* IPv6 header */
+
+ pip6 = ipv6_hdr(reply);
+ memset(pip6, 0, sizeof(struct ipv6hdr));
+ pip6->version = 6;
+ pip6->priority = ipv6_hdr(request)->priority;
+ pip6->nexthdr = IPPROTO_ICMPV6;
+ pip6->hop_limit = 255;
+ pip6->daddr = ipv6_hdr(request)->saddr;
+ pip6->saddr = *(struct in6_addr *)n->primary_key;
+
+ skb_pull(reply, sizeof(struct ipv6hdr));
+ skb_set_transport_header(reply, 0);
+
+ na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
+
+ /* Neighbor Advertisement */
+ memset(na, 0, sizeof(*na)+na_olen);
+ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ na->icmph.icmp6_router = isrouter;
+ na->icmph.icmp6_override = 1;
+ na->icmph.icmp6_solicited = 1;
+ na->target = ns->target;
+ ether_addr_copy(&na->opt[2], n->ha);
+ na->opt[0] = ND_OPT_TARGET_LL_ADDR;
+ na->opt[1] = na_olen >> 3;
+
+ na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
+ &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
+ csum_partial(na, sizeof(*na)+na_olen, 0));
+
+ pip6->payload_len = htons(sizeof(*na)+na_olen);
+
+ skb_push(reply, sizeof(struct ipv6hdr));
+
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return reply;
+}
+
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct neighbour *n;
- union vxlan_addr ipa;
+ struct nd_msg *msg;
const struct ipv6hdr *iphdr;
const struct in6_addr *saddr, *daddr;
- struct nd_msg *msg;
- struct inet6_dev *in6_dev = NULL;
+ struct neighbour *n;
+ struct inet6_dev *in6_dev;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
@@ -1360,19 +1451,20 @@
saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
- if (ipv6_addr_loopback(daddr) ||
- ipv6_addr_is_multicast(daddr))
- goto out;
-
msg = (struct nd_msg *)skb_transport_header(skb);
if (msg->icmph.icmp6_code != 0 ||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
goto out;
- n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+ if (ipv6_addr_loopback(daddr) ||
+ ipv6_addr_is_multicast(&msg->target))
+ goto out;
+
+ n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
struct vxlan_fdb *f;
+ struct sk_buff *reply;
if (!(n->nud_state & NUD_CONNECTED)) {
neigh_release(n);
@@ -1386,13 +1478,23 @@
goto out;
}
- ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
- !!in6_dev->cnf.forwarding,
- true, false, false);
+ reply = vxlan_na_create(skb, n,
+ !!(f ? f->flags & NTF_ROUTER : 0));
+
neigh_release(n);
+
+ if (reply == NULL)
+ goto out;
+
+ if (netif_rx_ni(reply) == NET_RX_DROP)
+ dev->stats.rx_dropped++;
+
} else if (vxlan->flags & VXLAN_F_L3MISS) {
- ipa.sin6.sin6_addr = *daddr;
- ipa.sa.sa_family = AF_INET6;
+ union vxlan_addr ipa = {
+ .sin6.sin6_addr = msg->target,
+ .sa.sa_family = AF_INET6,
+ };
+
vxlan_ip_miss(dev, &ipa);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1cc1356..1b6b4d0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -57,7 +57,7 @@
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -96,7 +96,7 @@
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 11eab9f..9078a6c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1534,7 +1534,7 @@
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
- u32 reg;
+ u32 reg, last_val;
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
@@ -1542,9 +1542,14 @@
if (AR_SREV_9285_12_OR_LATER(ah))
return true;
+ last_val = REG_READ(ah, AR_OBS_BUS_1);
do {
reg = REG_READ(ah, AR_OBS_BUS_1);
+ if (reg != last_val)
+ return true;
+ udelay(1);
+ last_val = reg;
if ((reg & 0x7E7FFFEF) == 0x00702400)
continue;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a0ebdd0..82e340d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -732,11 +732,18 @@
return NULL;
/*
- * mark descriptor as zero-length and set the 'more'
- * flag to ensure that both buffers get discarded
+ * Re-check previous descriptor, in case it has been filled
+ * in the mean time.
*/
- rs->rs_datalen = 0;
- rs->rs_more = true;
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
+ if (ret == -EINPROGRESS) {
+ /*
+ * mark descriptor as zero-length and set the 'more'
+ * flag to ensure that both buffers get discarded
+ */
+ rs->rs_datalen = 0;
+ rs->rs_more = true;
+ }
}
list_del(&bf->list);
@@ -985,32 +992,32 @@
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
- int ret = 0;
/*
* Discard corrupt descriptors which are marked in
* ath_get_next_rx_buf().
*/
- sc->rx.discard_next = rx_stats->rs_more;
if (discard_current)
- return -EINVAL;
+ goto corrupt;
+
+ sc->rx.discard_next = false;
/*
* Discard zero-length packets.
*/
if (!rx_stats->rs_datalen) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
- /*
- * rs_status follows rs_datalen so if rs_datalen is too large
- * we can take a hint that hardware corrupted it, so ignore
- * those frames.
- */
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
/* Only use status info from the last fragment */
@@ -1024,10 +1031,8 @@
* This is different from the other corrupt descriptor
* condition handled above.
*/
- if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
- ret = -EINVAL;
- goto exit;
- }
+ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
+ goto corrupt;
hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
@@ -1043,18 +1048,15 @@
if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
RX_STAT_INC(rx_spectral);
- ret = -EINVAL;
- goto exit;
+ return -EINVAL;
}
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ return -EINVAL;
if (ath_is_mybeacon(common, hdr)) {
RX_STAT_INC(rx_beacons);
@@ -1064,15 +1066,11 @@
/*
* This shouldn't happen, but have a safety check anyway.
*/
- if (WARN_ON(!ah->curchan)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (WARN_ON(!ah->curchan))
+ return -EINVAL;
- if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
- ret =-EINVAL;
- goto exit;
- }
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ return -EINVAL;
ath9k_process_rssi(common, hw, rx_stats, rx_status);
@@ -1087,9 +1085,11 @@
sc->rx.num_pkts++;
#endif
-exit:
- sc->rx.discard_next = false;
- return ret;
+ return 0;
+
+corrupt:
+ sc->rx.discard_next = rx_stats->rs_more;
+ return -EINVAL;
}
static void ath9k_rx_skb_postprocess(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0a75e2f..55897d5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1444,14 +1444,16 @@
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- if (!tid->sched)
- continue;
-
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
+ if (!tid->sched) {
+ ath_txq_unlock(sc, txq);
+ continue;
+ }
+
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
@@ -2061,7 +2063,7 @@
ATH_TXBUF_RESET(bf);
- if (tid) {
+ if (tid && ieee80211_is_data_present(hdr->frame_control)) {
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
@@ -2184,14 +2186,15 @@
txq->stopped = true;
}
+ if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
+ tid = ath_get_skb_tid(sc, txctl->an, skb);
+
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an &&
ieee80211_is_data_present(hdr->frame_control)) {
- tid = ath_get_skb_tid(sc, txctl->an, skb);
-
WARN_ON(tid->ac->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3e99189..ddaa9ef 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -457,7 +457,6 @@
u8 tx_hdrlen; /* sdio bus header length for tx packet */
bool txglom; /* host tx glomming enable flag */
- struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */
u16 head_align; /* buffer pointer alignment */
u16 sgentry_align; /* scatter-gather buffer alignment */
};
@@ -1944,19 +1943,21 @@
if (lastfrm && chain_pad)
tail_pad += blksize - chain_pad;
if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
- pkt_pad = bus->txglom_sgpad;
- if (pkt_pad == NULL)
- brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
+ bus->head_align);
if (pkt_pad == NULL)
return -ENOMEM;
ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ kfree_skb(pkt_pad);
return ret;
+ }
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);
*(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt, pkt->len - tail_chop);
+ skb_trim(pkt_pad, tail_pad + tail_chop);
__skb_queue_after(pktq, pkt, pkt_pad);
} else {
ntail = pkt->data_len + tail_pad -
@@ -2011,7 +2012,7 @@
return ret;
head_pad = (u16)ret;
if (head_pad)
- memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
+ memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
total_len += pkt_next->len;
@@ -3486,10 +3487,6 @@
bus->txglom = false;
value = 1;
pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
- bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
- if (!bus->txglom_sgpad)
- brcmf_err("allocating txglom padding skb failed, reduced performance\n");
-
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
if (err < 0) {
@@ -4053,7 +4050,6 @@
brcmf_sdio_chip_detach(&bus->ci);
}
- brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
kfree(bus->rxbuf);
kfree(bus->hdrbuf);
kfree(bus);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index d36e252..5965255 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -147,7 +147,7 @@
if (!sta->ap && sta->u.sta.challenge)
kfree(sta->u.sta.challenge);
- del_timer(&sta->timer);
+ del_timer_sync(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
kfree(sta);
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index c0d070c..9cdd91c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -590,6 +590,7 @@
sizeof(priv->tid_data[sta_id][tid]));
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
priv->num_stations--;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index a6839df..398dd09 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1291,8 +1291,6 @@
struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
struct iwl_ht_agg *agg;
struct sk_buff_head reclaimed_skbs;
- struct ieee80211_tx_info *info;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id;
int tid;
@@ -1379,22 +1377,28 @@
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
info = IEEE80211_SKB_CB(skb);
memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_resp->txed_2_done;
info->status.ampdu_len = ba_resp->txed;
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 76cde6c..18a895a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -872,8 +872,11 @@
lockdep_assert_held(&mvm->mutex);
- /* Rssi update while not associated ?! */
- if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ /*
+ * Rssi update while not associated - can happen since the statistics
+ * are handled asynchronously
+ */
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
/* No BT - reports should be disabled */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index e4ead86..2b0ba1f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -152,7 +152,7 @@
IWL_POWER_SCHEME_LP
};
-#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4df12fa..76ee486 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -822,16 +822,12 @@
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
- struct ieee80211_tx_info *info;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id, tid, freed;
-
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
-
/* "ssn" is start of block-ack Tx window, corresponds to index
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
@@ -888,22 +884,26 @@
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
- info = IEEE80211_SKB_CB(skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f47bcbe..3872ead 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,13 +359,12 @@
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 32f7500..cb6d189 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -621,7 +621,7 @@
id = *pos++;
elen = *pos++;
left -= 2;
- if (elen > left || elen == 0) {
+ if (elen > left) {
lbs_deb_scan("scan response: invalid IE fmt\n");
goto done;
}
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5e0eec4..5d9a808 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -189,8 +189,7 @@
vht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_vht_cap));
memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *)bss_desc->bcn_vht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_vht_cap,
le16_to_cpu(vht_cap->header.len));
mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 6261f8c..7db1a89 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -308,8 +308,7 @@
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_cap,
le16_to_cpu(ht_cap->header.len));
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 03688aa..7fe7b53 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -1211,6 +1211,12 @@
rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
+ /* If skb allocation was failed earlier for Rx packet,
+ * rx_buf_list[rd_index] would have been left with a NULL.
+ */
+ if (!skb_data)
+ return -ENOMEM;
+
MWIFIEX_SKB_PACB(skb_data, &buf_pa);
pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
PCI_DMA_FROMDEVICE);
@@ -1525,6 +1531,14 @@
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(adapter, skb->data,
skb->len);
+ mwifiex_pcie_enable_host_int(adapter);
+ if (mwifiex_write_reg(adapter,
+ PCIE_CPU_INT_EVENT,
+ CPU_INTR_SLEEP_CFM_DONE)) {
+ dev_warn(adapter->dev,
+ "Write register failed\n");
+ return -1;
+ }
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1993,23 +2007,9 @@
adapter->int_status |= pcie_ireg;
spin_unlock_irqrestore(&adapter->int_lock, flags);
- if (pcie_ireg & HOST_INTR_CMD_DONE) {
- if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
- (adapter->ps_state == PS_STATE_SLEEP)) {
- mwifiex_pcie_enable_host_int(adapter);
- if (mwifiex_write_reg(adapter,
- PCIE_CPU_INT_EVENT,
- CPU_INTR_SLEEP_CFM_DONE)
- ) {
- dev_warn(adapter->dev,
- "Write register failed\n");
- return;
-
- }
- }
- } else if (!adapter->pps_uapsd_mode &&
- adapter->ps_state == PS_STATE_SLEEP &&
- mwifiex_pcie_ok_to_access_hw(adapter)) {
+ if (!adapter->pps_uapsd_mode &&
+ adapter->ps_state == PS_STATE_SLEEP &&
+ mwifiex_pcie_ok_to_access_hw(adapter)) {
/* Potentially for PCIe we could get other
* interrupts like shared. Don't change power
* state until cookie is set */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0a8a26e..668547c 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2101,12 +2101,12 @@
curr_bss->ht_info_offset);
if (curr_bss->bcn_vht_cap)
- curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_cap_offset);
+ curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_cap_offset);
if (curr_bss->bcn_vht_oper)
- curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_info_offset);
+ curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_info_offset);
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index e8ebbd4..2087488 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,8 +22,6 @@
#define USB_VERSION "1.0"
-static const char usbdriver_name[] = "usb8xxx";
-
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
static struct usb_card_rec *usb_card;
@@ -527,13 +525,6 @@
MWIFIEX_BSS_ROLE_ANY),
MWIFIEX_ASYNC_CMD);
-#ifdef CONFIG_PM
- /* Resume handler may be called due to remote wakeup,
- * force to exit suspend anyway
- */
- usb_disable_autosuspend(card->udev);
-#endif /* CONFIG_PM */
-
return 0;
}
@@ -567,13 +558,12 @@
}
static struct usb_driver mwifiex_usb_driver = {
- .name = usbdriver_name,
+ .name = "mwifiex_usb",
.probe = mwifiex_usb_probe,
.disconnect = mwifiex_usb_disconnect,
.id_table = mwifiex_usb_table,
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
- .supports_autosuspend = 1,
};
static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 13eaeed..981cf6e 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -559,7 +559,8 @@
mwifiex_wmm_delete_all_ralist(priv);
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
- if (priv->adapter->if_ops.clean_pcie_ring)
+ if (priv->adapter->if_ops.clean_pcie_ring &&
+ !priv->adapter->surprise_removed)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 7f8b5d1..41d4a81 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -5460,14 +5460,15 @@
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
- rt2800_bbp_write(rt2x00dev, 69, 0x0d);
- rt2800_bbp_write(rt2x00dev, 70, 0x06);
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -5510,7 +5511,6 @@
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
- rt2800_bbp_write(rt2x00dev, 148, 0x84);
}
rt2800_disable_unused_dac_adc(rt2x00dev);
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 123c4bb..cde0eaf 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -180,7 +180,7 @@
wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
/* The actual length doesn't include the target's alignment */
- skb->len = desc->length - PLCP_HEADER_LENGTH;
+ skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
fc = (u16 *)skb->data;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7669d49..301cc03 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -132,8 +132,7 @@
/* If the skb is GSO then we'll also need an extra slot for the
* metadata.
*/
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso(skb))
min_slots_needed++;
/* If the skb can't possibly fit in the remaining slots
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e5284bc..438d0c0 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -240,7 +240,7 @@
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
- int gso_type;
+ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -299,12 +299,12 @@
}
/* Leave a gap for the GSO descriptor. */
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- else
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
@@ -338,19 +338,15 @@
int head = 1;
int old_meta_prod;
int gso_type;
- int gso_size;
old_meta_prod = npo->meta_prod;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- gso_size = skb_shinfo(skb)->gso_size;
- } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- gso_size = skb_shinfo(skb)->gso_size;
- } else {
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
- gso_size = 0;
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
/* Set up a GSO prefix descriptor, if necessary */
@@ -358,7 +354,7 @@
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -368,7 +364,7 @@
if ((1 << gso_type) & vif->gso_mask) {
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
} else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
@@ -500,8 +496,9 @@
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
max_slots_needed++;
/* If the skb may not fit then bail out now */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f9daa9e..e30d800 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -907,6 +907,7 @@
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
+ skb_reset_network_header(skb);
if (checksum_setup(dev, skb)) {
kfree_skb(skb);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 00660cc..3890166 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -162,8 +162,6 @@
avail = *r;
pci_clip_resource_to_region(bus, &avail, region);
- if (!resource_size(&avail))
- continue;
/*
* "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6b05f61..fdbc294 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1192,6 +1192,9 @@
return err;
pci_fixup_device(pci_fixup_enable, dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ return 0;
+
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index be361b7..1e4e693 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -217,7 +217,7 @@
select PINCTRL_MXS
config PINCTRL_MSM
- tristate
+ bool
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
index 4669c53..eb25002 100644
--- a/drivers/pinctrl/pinctrl-capri.c
+++ b/drivers/pinctrl/pinctrl-capri.c
@@ -1435,7 +1435,7 @@
}
static struct of_device_id capri_pinctrl_of_match[] = {
- { .compatible = "brcm,capri-pinctrl", },
+ { .compatible = "brcm,bcm11351-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index 9ccf681..f9fabe9 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -584,7 +585,7 @@
spin_lock_irqsave(&pctl->lock, flags);
regval = readl(pctl->membase + reg);
- regval &= ~IRQ_CFG_IRQ_MASK;
+ regval &= ~(IRQ_CFG_IRQ_MASK << index);
writel(regval | (mode << index), pctl->membase + reg);
spin_unlock_irqrestore(&pctl->lock, flags);
@@ -665,6 +666,7 @@
static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_get_chip(irq);
struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
@@ -674,10 +676,12 @@
if (reg) {
int irqoffset;
+ chained_irq_enter(chip, desc);
for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) {
int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
generic_handle_irq(pin_irq);
}
+ chained_irq_exit(chip, desc);
}
}
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index 01c494f..552b0e9 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -511,7 +511,7 @@
static inline u32 sunxi_irq_cfg_reg(u16 irq)
{
- u8 reg = irq / IRQ_CFG_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04;
return reg + IRQ_CFG_REG;
}
@@ -523,7 +523,7 @@
static inline u32 sunxi_irq_ctrl_reg(u16 irq)
{
- u8 reg = irq / IRQ_CTRL_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04;
return reg + IRQ_CTRL_REG;
}
@@ -535,7 +535,7 @@
static inline u32 sunxi_irq_status_reg(u16 irq)
{
- u8 reg = irq / IRQ_STATUS_IRQ_PER_REG;
+ u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04;
return reg + IRQ_STATUS_REG;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 77d103f..567d691 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -89,7 +89,8 @@
/* GPSR6 */
FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14,
- FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23,
+ FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19,
+ FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK,
FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0,
FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7,
FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17,
@@ -788,6 +789,7 @@
PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN),
PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC),
PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN),
+ PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK),
/* IPSR0 */
PINMUX_IPSR_DATA(IP0_0, D0),
@@ -3825,7 +3827,7 @@
GP_6_11_FN, FN_IP13_25,
GP_6_10_FN, FN_IP13_24_23,
GP_6_9_FN, FN_IP13_22,
- 0, 0,
+ GP_6_8_FN, FN_SD1_CLK,
GP_6_7_FN, FN_IP13_21_19,
GP_6_6_FN, FN_IP13_18_16,
GP_6_5_FN, FN_IP13_15,
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index a0d6152..617a491 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -598,7 +598,7 @@
{
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq))
+ if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE))
dev_err(bank->chip.gc.dev,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
@@ -611,7 +611,7 @@
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
sirfsoc_gpio_irq_mask(d);
- gpio_unlock_as_irq(&bank->chip.gc, d->hwirq);
+ gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
}
static struct irq_chip sirfsoc_irq_chip = {
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 167f3d0..66977eb 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -183,9 +183,7 @@
struct resource r = {0};
int i, flags;
- if (acpi_dev_resource_memory(res, &r)
- || acpi_dev_resource_io(res, &r)
- || acpi_dev_resource_address_space(res, &r)
+ if (acpi_dev_resource_address_space(res, &r)
|| acpi_dev_resource_ext_address_space(res, &r)) {
pnp_add_resource(dev, &r);
return AE_OK;
@@ -217,6 +215,17 @@
}
switch (res->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (acpi_dev_resource_memory(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
+ case ACPI_RESOURCE_TYPE_IO:
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ if (acpi_dev_resource_io(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index b4b0d83..7061ac0 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -678,6 +678,7 @@
struct list_head free_list;
dma_cookie_t completed_cookie;
struct tasklet_struct tasklet;
+ bool active;
};
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 502663f..91245f5d 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -206,8 +206,8 @@
{
/* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
-
- tasklet_schedule(&bdma_chan->tasklet);
+ if (bdma_chan->active)
+ tasklet_schedule(&bdma_chan->tasklet);
}
#ifdef CONFIG_PCI_MSI
@@ -562,7 +562,7 @@
}
#endif /* CONFIG_PCI_MSI */
- tasklet_enable(&bdma_chan->tasklet);
+ bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
return bdma_chan->bd_num - 1;
@@ -576,9 +576,7 @@
static void tsi721_free_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
-#ifdef CONFIG_PCI_MSI
struct tsi721_device *priv = to_tsi721(dchan->device);
-#endif
LIST_HEAD(list);
dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
@@ -589,14 +587,25 @@
BUG_ON(!list_empty(&bdma_chan->active_list));
BUG_ON(!list_empty(&bdma_chan->queue));
- tasklet_disable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+ bdma_chan->active = false;
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector);
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector);
+ } else
+#endif
+ synchronize_irq(priv->pdev->irq);
+
+ tasklet_kill(&bdma_chan->tasklet);
spin_lock_bh(&bdma_chan->lock);
list_splice_init(&bdma_chan->free_list, &list);
spin_unlock_bh(&bdma_chan->lock);
- tsi721_bdma_interrupt_enable(bdma_chan, 0);
-
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
@@ -790,6 +799,7 @@
bdma_chan->dchan.cookie = 1;
bdma_chan->dchan.chan_id = i;
bdma_chan->id = i;
+ bdma_chan->active = false;
spin_lock_init(&bdma_chan->lock);
@@ -799,7 +809,6 @@
tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
(unsigned long)bdma_chan);
- tasklet_disable(&bdma_chan->tasklet);
list_add_tail(&bdma_chan->dchan.device_node,
&mport->dma.channels);
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d1ac4ca..afca1bc 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -953,6 +953,8 @@
return 0;
}
+static int _regulator_do_enable(struct regulator_dev *rdev);
+
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -1013,10 +1015,9 @@
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
*/
- if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
- ops->enable) {
- ret = ops->enable(rdev);
- if (ret < 0) {
+ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ ret = _regulator_do_enable(rdev);
+ if (ret < 0 && ret != -EINVAL) {
rdev_err(rdev, "failed to enable\n");
goto out;
}
@@ -1907,8 +1908,6 @@
trace_regulator_disable_complete(rdev_get_name(rdev));
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
return 0;
}
@@ -1932,6 +1931,8 @@
rdev_err(rdev, "failed to disable\n");
return ret;
}
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
rdev->use_count = 0;
@@ -1984,20 +1985,16 @@
{
int ret = 0;
- /* force disable */
- if (rdev->desc->ops->disable) {
- /* ah well, who wants to live forever... */
- ret = rdev->desc->ops->disable(rdev);
- if (ret < 0) {
- rdev_err(rdev, "failed to force disable\n");
- return ret;
- }
- /* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
- REGULATOR_EVENT_DISABLE, NULL);
+ ret = _regulator_do_disable(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to force disable\n");
+ return ret;
}
- return ret;
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
+
+ return 0;
}
/**
@@ -3630,23 +3627,18 @@
mutex_lock(®ulator_list_mutex);
list_for_each_entry(rdev, ®ulator_list, list) {
- struct regulator_ops *ops = rdev->desc->ops;
-
mutex_lock(&rdev->mutex);
- if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
- ops->enable) {
- error = ops->enable(rdev);
+ if (rdev->use_count > 0 || rdev->constraints->always_on) {
+ error = _regulator_do_enable(rdev);
if (error)
ret = error;
} else {
if (!have_full_constraints())
goto unlock;
- if (!ops->disable)
- goto unlock;
if (!_regulator_is_enabled(rdev))
goto unlock;
- error = ops->disable(rdev);
+ error = _regulator_do_disable(rdev);
if (error)
ret = error;
}
@@ -3820,7 +3812,7 @@
ops = rdev->desc->ops;
c = rdev->constraints;
- if (!ops->disable || (c && c->always_on))
+ if (c && c->always_on)
continue;
mutex_lock(&rdev->mutex);
@@ -3841,7 +3833,7 @@
/* We log since this may kill the system if it
* goes wrong. */
rdev_info(rdev, "disabling\n");
- ret = ops->disable(rdev);
+ ret = _regulator_do_disable(rdev);
if (ret != 0)
rdev_err(rdev, "couldn't disable: %d\n", ret);
} else {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7afd373..c4cde9c 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -580,10 +580,12 @@
clk_enable(rtc_clk);
/* save TICNT for anyone using periodic interrupts */
- ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+ ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT);
+ } else {
+ ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
}
s3c_rtc_enable(pdev, 0);
@@ -605,10 +607,15 @@
clk_enable(rtc_clk);
s3c_rtc_enable(pdev, 1);
- writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
- if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
- tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
- writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+ writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
+ if (ticnt_en_save) {
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+ writew(tmp | ticnt_en_save,
+ s3c_rtc_base + S3C2410_RTCCON);
+ }
+ } else {
+ writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
}
if (device_may_wakeup(dev) && wake_en) {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c3a83df..795ed61 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1660,7 +1660,6 @@
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
@@ -2605,6 +2604,7 @@
return 0;
out_qdio:
qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ qdio_free(CARD_DDEV(card));
return rc;
}
@@ -4906,9 +4906,11 @@
if (retries < 3)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
dev_name(&card->gdev->dev));
+ rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
rc = ccw_device_set_online(CARD_RDEV(card));
if (rc)
goto retriable;
@@ -4918,7 +4920,6 @@
rc = ccw_device_set_online(CARD_DDEV(card));
if (rc)
goto retriable;
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
retriable:
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break1");
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0710550..908d825 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1091,6 +1091,7 @@
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -1132,6 +1133,7 @@
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -1194,6 +1196,7 @@
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0f43042..3524d34 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3447,6 +3447,7 @@
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -3493,6 +3494,7 @@
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -3545,6 +3547,7 @@
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f37505..5642a9b 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,7 +325,7 @@
if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
continue;
- if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
+ if (sc->device->lun != abrt_task->sc->device->lun)
continue;
/* Invalidate WRB Posted for this Task */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index ed88089..e9279a8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -594,13 +594,13 @@
mp_req->mp_resp_bd = NULL;
}
if (mp_req->req_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->req_buf,
mp_req->req_buf_dma);
mp_req->req_buf = NULL;
}
if (mp_req->resp_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->resp_buf,
mp_req->resp_buf_dma);
mp_req->resp_buf = NULL;
@@ -622,7 +622,7 @@
mp_req->req_len = sizeof(struct fcp_cmnd);
io_req->data_xfer_len = mp_req->req_len;
- mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->req_buf_dma,
GFP_ATOMIC);
if (!mp_req->req_buf) {
@@ -631,7 +631,7 @@
return FAILED;
}
- mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->resp_buf_dma,
GFP_ATOMIC);
if (!mp_req->resp_buf) {
@@ -639,8 +639,8 @@
bnx2fc_free_mp_resc(io_req);
return FAILED;
}
- memset(mp_req->req_buf, 0, PAGE_SIZE);
- memset(mp_req->resp_buf, 0, PAGE_SIZE);
+ memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
/* Allocate and map mp_req_bd and mp_resp_bd */
sz = sizeof(struct fcoe_bd_ctx);
@@ -665,7 +665,7 @@
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->buf_len = CNIC_PAGE_SIZE;
mp_req_bd->flags = 0;
/*
@@ -677,7 +677,7 @@
addr = mp_req->resp_buf_dma;
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
mp_resp_bd->flags = 0;
return SUCCESS;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 4d93177..d9bae56 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -673,7 +673,8 @@
/* Allocate and map SQ */
tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
- tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
&tgt->sq_dma, GFP_KERNEL);
@@ -686,7 +687,8 @@
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
- tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
&tgt->cq_dma, GFP_KERNEL);
@@ -699,7 +701,8 @@
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
- tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
&tgt->rq_dma, GFP_KERNEL);
@@ -710,8 +713,9 @@
}
memset(tgt->rq, 0, tgt->rq_mem_size);
- tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
- tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
&tgt->rq_pbl_dma, GFP_KERNEL);
@@ -722,7 +726,7 @@
}
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
- num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -731,13 +735,13 @@
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map XFERQ */
tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
- tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
&tgt->xferq_dma, GFP_KERNEL);
@@ -750,8 +754,8 @@
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
- tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
&tgt->confq_dma, GFP_KERNEL);
@@ -763,9 +767,9 @@
memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
- (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
- (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
tgt->confq_pbl_size,
@@ -777,7 +781,7 @@
}
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
- num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -786,7 +790,7 @@
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map ConnDB */
@@ -805,8 +809,8 @@
/* Allocate and map LCQ */
tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
- tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
&tgt->lcq_dma, GFP_KERNEL);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e4cf23d..b87a193 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -61,7 +61,7 @@
* yield integral num of page buffers
*/
/* adjust SQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
if (hba->max_sqes < num_elements_per_pg)
hba->max_sqes = num_elements_per_pg;
else if (hba->max_sqes % num_elements_per_pg)
@@ -69,7 +69,7 @@
~(num_elements_per_pg - 1);
/* adjust CQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
if (hba->max_cqes < num_elements_per_pg)
hba->max_cqes = num_elements_per_pg;
else if (hba->max_cqes % num_elements_per_pg)
@@ -77,7 +77,7 @@
~(num_elements_per_pg - 1);
/* adjust RQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
if (hba->max_rqes < num_elements_per_pg)
hba->max_rqes = num_elements_per_pg;
else if (hba->max_rqes % num_elements_per_pg)
@@ -959,7 +959,7 @@
/* SQ page table */
memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
- num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.sq_phys;
if (cnic_dev_10g)
@@ -973,7 +973,7 @@
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -981,13 +981,13 @@
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* RQ page table */
memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
- num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.rq_phys;
if (cnic_dev_10g)
@@ -1001,7 +1001,7 @@
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1009,13 +1009,13 @@
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* CQ page table */
memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
- num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.cq_phys;
if (cnic_dev_10g)
@@ -1029,7 +1029,7 @@
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1037,7 +1037,7 @@
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
}
@@ -1064,11 +1064,11 @@
/* Allocate page table memory for SQ which is page aligned */
ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
ep->qp.sq_mem_size =
- (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
@@ -1101,11 +1101,11 @@
/* Allocate page table memory for CQ which is page aligned */
ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
ep->qp.cq_mem_size =
- (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
@@ -1144,11 +1144,11 @@
/* Allocate page table memory for RQ which is page aligned */
ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
ep->qp.rq_mem_size =
- (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
@@ -1270,7 +1270,7 @@
bnx2i_adjust_qp_size(hba);
iscsi_init.flags =
- ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+ (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
if (en_tcp_dack)
iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
iscsi_init.reserved0 = 0;
@@ -1288,15 +1288,15 @@
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
iscsi_init.num_tasks_per_conn = hba->max_sqes;
- iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
iscsi_init.sq_num_wqes = hba->max_sqes;
iscsi_init.cq_log_wqes_per_page =
- (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+ (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
iscsi_init.cq_num_wqes = hba->max_cqes;
iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
iscsi_init.rq_num_wqes = hba->max_rqes;
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 854dad7..c8b0aff 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -525,7 +525,7 @@
struct iscsi_bd *mp_bdt;
u64 addr;
- hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&hba->mp_bd_dma, GFP_KERNEL);
if (!hba->mp_bd_tbl) {
printk(KERN_ERR "unable to allocate Middle Path BDT\n");
@@ -533,11 +533,12 @@
goto out;
}
- hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ CNIC_PAGE_SIZE,
&hba->dummy_buf_dma, GFP_KERNEL);
if (!hba->dummy_buffer) {
printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
rc = -1;
@@ -548,7 +549,7 @@
addr = (unsigned long) hba->dummy_buf_dma;
mp_bdt->buffer_addr_lo = addr & 0xffffffff;
mp_bdt->buffer_addr_hi = addr >> 32;
- mp_bdt->buffer_length = PAGE_SIZE;
+ mp_bdt->buffer_length = CNIC_PAGE_SIZE;
mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
out:
@@ -565,12 +566,12 @@
static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
{
if (hba->mp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
}
if (hba->dummy_buffer) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->dummy_buffer, hba->dummy_buf_dma);
hba->dummy_buffer = NULL;
}
@@ -934,14 +935,14 @@
struct bnx2i_conn *bnx2i_conn)
{
if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.resp_bd_tbl,
bnx2i_conn->gen_pdu.resp_bd_dma);
bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
}
if (bnx2i_conn->gen_pdu.req_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@@ -998,13 +999,13 @@
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
bnx2i_conn->gen_pdu.req_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
goto login_req_bd_tbl_failure;
bnx2i_conn->gen_pdu.resp_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.resp_bd_dma,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
@@ -1013,7 +1014,7 @@
return 0;
login_resp_bd_tbl_failure:
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 4911310..22a9bb1 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -311,9 +311,8 @@
}
#define for_each_isci_host(id, ihost, pdev) \
- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
- ihost = to_pci_info(pdev)->hosts[++id])
+ for (id = 0; id < SCI_MAX_CONTROLLERS && \
+ (ihost = to_pci_info(pdev)->hosts[id]); id++)
static inline void wait_for_start(struct isci_host *ihost)
{
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 85c77f6..ac87974 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -615,13 +615,6 @@
SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
- u32 port_state = iport->sm.current_state_id;
-
- /* if the PORT'S state is resetting then the link up is from
- * port hard reset in this case, we need to tell the port
- * that link up is recieved
- */
- BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
sci_port_link_up(iport, iphy);
}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 0d30ca8..5d6fda7 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -801,7 +801,7 @@
/* XXX: need to cleanup any ireqs targeting this
* domain_device
*/
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1fe95e..266724b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2996,8 +2996,7 @@
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
IS_QLA8044(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
+#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9bc86b9..0a1dcb4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2880,6 +2880,7 @@
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
+#define ATIO_VECTOR 2
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
@@ -2936,34 +2937,47 @@
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < ha->msix_count; i++) {
+ for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- ret = request_irq(qentry->vector,
- qla83xx_msix_entries[i].handler,
- 0, qla83xx_msix_entries[i].name, rsp);
- } else if (IS_P3P_TYPE(ha)) {
+ if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
- } else {
+ else
ret = request_irq(qentry->vector,
msix_entries[i].handler,
0, msix_entries[i].name, rsp);
- }
- if (ret) {
- ql_log(ql_log_fatal, vha, 0x00cb,
- "MSI-X: unable to register handler -- %x/%d.\n",
- qentry->vector, ret);
- qla24xx_disable_msix(ha);
- ha->mqenable = 0;
- goto msix_out;
- }
+ if (ret)
+ goto msix_register_fail;
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
}
+ /*
+ * If target mode is enable, also request the vector for the ATIO
+ * queue.
+ */
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ qentry = &ha->msix_entries[ATIO_VECTOR];
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[ATIO_VECTOR].handler,
+ 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
+ }
+
+msix_register_fail:
+ if (ret) {
+ ql_log(ql_log_fatal, vha, 0x00cb,
+ "MSI-X: unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
+ qla24xx_disable_msix(ha);
+ ha->mqenable = 0;
+ goto msix_out;
+ }
+
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 17d7404..9969fa1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1419,6 +1419,9 @@
{
struct stor_mem_pools *memp = sdevice->hostdata;
+ if (!memp)
+ return;
+
mempool_destroy(memp->request_mempool);
kmem_cache_destroy(memp->request_pool);
kfree(memp);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 31534b5..c3b2fb9 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -132,9 +132,9 @@
flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
- flags |= GPIOF_INIT_HIGH;
- else
flags |= GPIOF_INIT_LOW;
+ else
+ flags |= GPIOF_INIT_HIGH;
status = gpio_request_one(cdata->gpio, flags,
dev_name(&spi->dev));
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index b0842f7..5d7b07f 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1455,6 +1455,14 @@
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ /* Stop the queue running */
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
clk_disable_unprepare(as->clk);
return 0;
@@ -1464,9 +1472,16 @@
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
clk_prepare_enable(as->clk);
- return 0;
+
+ /* Start the queue running */
+ ret = spi_master_resume(master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+
+ return ret;
}
static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index cabed8f..28ae470 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -514,7 +514,8 @@
#ifdef CONFIG_PM_RUNTIME
static int mcfqspi_runtime_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_disable(mcfqspi->clk);
@@ -523,7 +524,8 @@
static int mcfqspi_runtime_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_enable(mcfqspi->clk);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index ec79f72..a253920 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -420,7 +420,6 @@
static int dspi_resume(struct device *dev)
{
-
struct spi_master *master = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_master_get_devdata(master);
@@ -504,7 +503,7 @@
clk_prepare_enable(dspi->clk);
init_waitqueue_head(&dspi->waitq);
- platform_set_drvdata(pdev, dspi);
+ platform_set_drvdata(pdev, master);
ret = spi_bitbang_start(&dspi->bitbang);
if (ret != 0) {
@@ -525,7 +524,8 @@
static int dspi_remove(struct platform_device *pdev)
{
- struct fsl_dspi *dspi = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
spi_bitbang_stop(&dspi->bitbang);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index a5474ef..47f15d9 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -948,8 +948,8 @@
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable_unprepare(spi_imx->clk_ipg);
- clk_disable_unprepare(spi_imx->clk_per);
+ clk_unprepare(spi_imx->clk_ipg);
+ clk_unprepare(spi_imx->clk_per);
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 2e7f38c..88eb57e 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -915,7 +915,7 @@
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -930,7 +930,7 @@
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1452,6 +1452,11 @@
pch_spi_set_master_mode(master);
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
+
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&plat_dev->dev,
@@ -1459,14 +1464,10 @@
goto err_spi_register_master;
}
- if (use_dma) {
- dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
- }
-
return 0;
err_spi_register_master:
+ pch_free_dma_buf(board_dat, data);
free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 4a08e16..79206cb 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -866,6 +866,8 @@
_IOC_SIZE (iocmd));
#endif
iolen = _IOC_SIZE (iocmd);
+ if (iolen > sizeof(arg))
+ return -EFAULT;
data = ifr->ifr_data + sizeof (iocmd);
if (copy_from_user (&arg, data, iolen))
return -EFAULT;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7f1a7ce..b83ec37 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -785,7 +785,7 @@
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
iscsit_free_cmd(cmd, false);
}
}
@@ -3708,7 +3708,7 @@
break;
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, false);
@@ -4151,7 +4151,7 @@
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
@@ -4196,6 +4196,10 @@
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
iscsit_free_queue_reqs_for_conn(conn);
/*
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 33be1fb..4ca8fd2 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,7 +138,7 @@
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -160,7 +160,7 @@
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -216,7 +216,7 @@
}
cr = cmd->cr;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
@@ -297,7 +297,7 @@
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -335,7 +335,7 @@
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
- * list_del(&cmd->i_conn_node); to release the command to the
+ * list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
@@ -351,7 +351,7 @@
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -371,7 +371,7 @@
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -393,7 +393,7 @@
cmd->sess = conn->sess;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 3976183..44a5471 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -137,7 +137,7 @@
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
- if (tpg->tpg_state == TPG_STATE_FREE) {
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 42f18fc..77e6531 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1079,25 +1079,31 @@
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
-
- len = min(psg->length, left);
- if (offset >= sg->length) {
- sg = sg_next(sg);
- offset = 0;
- }
+ unsigned int psg_len, copied = 0;
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
+ psg_len = min(left, psg->length);
+ while (psg_len) {
+ len = min(psg_len, sg->length - offset);
+ addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
- if (read)
- memcpy(paddr, addr, len);
- else
- memcpy(addr, paddr, len);
+ if (read)
+ memcpy(paddr + copied, addr, len);
+ else
+ memcpy(addr, paddr + copied, len);
- left -= len;
- offset += len;
+ left -= len;
+ offset += len;
+ copied += len;
+ psg_len -= len;
+
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ kunmap_atomic(addr);
+ }
kunmap_atomic(paddr);
- kunmap_atomic(addr);
}
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 35c0664..5f88d76 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -136,6 +136,7 @@
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework.
@@ -210,8 +211,16 @@
tristate "ACPI INT3403 thermal driver"
depends on X86 && ACPI
help
- This driver uses ACPI INT3403 device objects. If present, it will
- register each INT3403 thermal sensor as a thermal zone.
+ Newer laptops and tablets that use ACPI may have thermal sensors
+ outside the core CPU/SOC for thermal safety reasons. These
+ temperature sensors are also exposed for the OS to use via the so
+ called INT3403 ACPI object. This driver will, on devices that have
+ such sensors, expose the temperature information from these sensors
+ to userspace via the normal thermal framework. This means that a wide
+ range of applications and GUI widgets can show this information to
+ the user or use this information for making decisions. For example,
+ the Intel Thermal Daemon can use this information to allow the user
+ to select his laptop to run without turning on the fans.
menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 338a88b..71b0ec0 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -56,10 +56,15 @@
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
+static struct thermal_governor *def_governor;
+
static struct thermal_governor *__find_governor(const char *name)
{
struct thermal_governor *pos;
+ if (!name || !name[0])
+ return def_governor;
+
list_for_each_entry(pos, &thermal_governor_list, governor_list)
if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
return pos;
@@ -82,17 +87,23 @@
if (__find_governor(governor->name) == NULL) {
err = 0;
list_add(&governor->governor_list, &thermal_governor_list);
+ if (!def_governor && !strncmp(governor->name,
+ DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
+ def_governor = governor;
}
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node) {
+ /*
+ * only thermal zones with specified tz->tzp->governor_name
+ * may run with tz->govenor unset
+ */
if (pos->governor)
continue;
- if (pos->tzp)
- name = pos->tzp->governor_name;
- else
- name = DEFAULT_THERMAL_GOVERNOR;
+
+ name = pos->tzp->governor_name;
+
if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
pos->governor = governor;
}
@@ -342,8 +353,8 @@
static void handle_non_critical_trips(struct thermal_zone_device *tz,
int trip, enum thermal_trip_type trip_type)
{
- if (tz->governor)
- tz->governor->throttle(tz, trip);
+ tz->governor ? tz->governor->throttle(tz, trip) :
+ def_governor->throttle(tz, trip);
}
static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -1107,7 +1118,7 @@
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->np = np;
cdev->ops = ops;
- cdev->updated = true;
+ cdev->updated = false;
cdev->device.class = &thermal_class;
cdev->devdata = devdata;
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1533,7 +1544,7 @@
if (tz->tzp)
tz->governor = __find_governor(tz->tzp->governor_name);
else
- tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR);
+ tz->governor = def_governor;
mutex_unlock(&thermal_governor_lock);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 972e1c7..081fd7e 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,6 +68,10 @@
struct thermal_zone_device *tzone;
};
+static const struct thermal_zone_params pkg_temp_tz_params = {
+ .no_hwmon = true,
+};
+
/* List maintaining number of package instances */
static LIST_HEAD(phy_dev_list);
static DEFINE_MUTEX(phy_dev_list_mutex);
@@ -394,7 +398,6 @@
int err;
u32 tj_max;
struct phy_dev_entry *phy_dev_entry;
- char buffer[30];
int thres_count;
u32 eax, ebx, ecx, edx;
u8 *temp;
@@ -440,13 +443,11 @@
phy_dev_entry->first_cpu = cpu;
phy_dev_entry->tj_max = tj_max;
phy_dev_entry->ref_cnt = 1;
- snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n",
- phy_dev_entry->phys_proc_id);
- phy_dev_entry->tzone = thermal_zone_device_register(buffer,
+ phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ?
0x03 : 0x01,
- phy_dev_entry, &tzone_ops, NULL, 0, 0);
+ phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
if (IS_ERR(phy_dev_entry->tzone)) {
err = PTR_ERR(phy_dev_entry->tzone);
goto err_ret_free;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index cf86e72..dc697ce 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -433,13 +433,10 @@
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
@@ -470,8 +467,7 @@
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -492,7 +488,10 @@
unsigned long flags;
int i, locked = 1;
- local_irq_save(flags);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
@@ -507,8 +506,7 @@
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static struct console sunhv_console = {
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 380fb53..5faa8e9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -844,20 +844,16 @@
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int sunsab_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index db79b76..9a0f24f 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1295,13 +1295,10 @@
unsigned int ier;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the UER then disable the interrupts
@@ -1319,8 +1316,7 @@
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 45a8c6a..a2c40ed 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1195,20 +1195,16 @@
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 8d72f0c..062967c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -717,6 +717,10 @@
result = -ENOMEM;
goto err;
}
+
+ if (dev->quirks & USB_QUIRK_DELAY_INIT)
+ msleep(100);
+
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
if (result < 0) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8f37063..739ee8e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -47,6 +47,10 @@
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech HD Pro Webcams C920 and C930e */
+ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6fe577d..924a6cc 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4733,6 +4733,9 @@
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
+ /* support to build packet from discontinuous buffers */
+ hcd->self.no_sg_constraint = 1;
+
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
@@ -4757,14 +4760,6 @@
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
- xhci = hcd_to_xhci(hcd);
- /*
- * Support arbitrarily aligned sg-list entries on hosts without
- * TD fragment rules (which are currently unsupported).
- */
- if (xhci->hci_version < 0x100)
- hcd->self.no_sg_constraint = 1;
-
return 0;
}
@@ -4793,9 +4788,6 @@
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
- if (xhci->hci_version < 0x100)
- hcd->self.no_sg_constraint = 1;
-
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4fb7a8f..54af4e9 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -186,12 +186,12 @@
if (pfn_valid(pfn)) {
bool reserved;
struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_trans_head(tail);
+ struct page *head = compound_head(tail);
reserved = !!(PageReserved(head));
if (head != tail) {
/*
* "head" is not a dangling pointer
- * (compound_trans_head takes care of that)
+ * (compound_head takes care of that)
* but the hugepage may have been split
* from under us (and we may not hold a
* reference count on the head page so it can
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index a0fa5de..e1e22e0 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -505,9 +505,13 @@
r = -ENOBUFS;
goto err;
}
- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
+ if (unlikely(r < 0))
+ goto err;
+
+ d = r;
if (d == vq->num) {
r = 0;
goto err;
@@ -532,6 +536,12 @@
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
+
+ /* Detect overrun */
+ if (unlikely(datalen > 0)) {
+ r = UIO_MAXIOV + 1;
+ goto err;
+ }
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@@ -587,6 +597,14 @@
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
+ /* On overrun, truncate and discard */
+ if (unlikely(headcount > UIO_MAXIOV)) {
+ msg.msg_iovlen = 1;
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ 1, MSG_DONTWAIT | MSG_TRUNC);
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
+ continue;
+ }
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 37d06ea..61a6ac8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -399,12 +399,26 @@
state = BP_EAGAIN;
break;
}
-
- pfn = page_to_pfn(page);
- frame_list[i] = pfn_to_mfn(pfn);
-
scrub_page(page);
+ frame_list[i] = page_to_pfn(page);
+ }
+
+ /*
+ * Ensure that ballooned highmem pages don't have kmaps.
+ *
+ * Do this before changing the p2m as kmap_flush_unused()
+ * reads PTEs to obtain pages (and hence needs the original
+ * p2m entry).
+ */
+ kmap_flush_unused();
+
+ /* Update direct mapping, invalidate P2M, and add to balloon. */
+ for (i = 0; i < nr_pages; i++) {
+ pfn = frame_list[i];
+ frame_list[i] = pfn_to_mfn(pfn);
+ page = pfn_to_page(pfn);
+
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
@@ -429,11 +443,9 @@
}
#endif
- balloon_append(pfn_to_page(pfn));
+ balloon_append(page);
}
- /* Ensure that ballooned highmem pages don't have kmaps. */
- kmap_flush_unused();
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 2408473..80ef38c 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -41,19 +41,8 @@
static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- struct dentry *root;
- root = mount_pseudo(fs_type, "anon_inode:", NULL,
+ return mount_pseudo(fs_type, "anon_inode:", NULL,
&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
- if (!IS_ERR(root)) {
- struct super_block *s = root->d_sb;
- anon_inode_inode = alloc_anon_inode(s);
- if (IS_ERR(anon_inode_inode)) {
- dput(root);
- deactivate_locked_super(s);
- root = ERR_CAST(anon_inode_inode);
- }
- }
- return root;
}
static struct file_system_type anon_inode_fs_type = {
@@ -175,22 +164,15 @@
static int __init anon_inode_init(void)
{
- int error;
-
- error = register_filesystem(&anon_inode_fs_type);
- if (error)
- goto err_exit;
anon_inode_mnt = kern_mount(&anon_inode_fs_type);
- if (IS_ERR(anon_inode_mnt)) {
- error = PTR_ERR(anon_inode_mnt);
- goto err_unregister_filesystem;
- }
- return 0;
+ if (IS_ERR(anon_inode_mnt))
+ panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
-err_unregister_filesystem:
- unregister_filesystem(&anon_inode_fs_type);
-err_exit:
- panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
+ anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+ if (IS_ERR(anon_inode_inode))
+ panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+
+ return 0;
}
fs_initcall(anon_inode_init);
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 0129b78..4f70f38 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -458,11 +458,10 @@
struct blk_integrity_exchg bix;
struct bio_vec *bv;
sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
- unsigned int sectors, total, ret;
+ unsigned int sectors, ret = 0;
void *prot_buf = bio->bi_integrity->bip_buf;
int i;
- ret = total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
@@ -484,8 +483,6 @@
sectors = bv->bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
- total += sectors * bi->tuple_size;
- BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
kunmap_atomic(kaddr);
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index cf32f03..c0f3718 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -513,7 +513,7 @@
static inline unsigned int
get_rfc1002_length(void *buf)
{
- return be32_to_cpu(*((__be32 *)buf));
+ return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
}
static inline void
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 53c1507..834fce7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2579,31 +2579,19 @@
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
ssize_t rc = -EACCES;
+ loff_t lock_pos = pos;
- BUG_ON(iocb->ki_pos != pos);
-
+ if (file->f_flags & O_APPEND)
+ lock_pos = i_size_read(inode);
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents writing.
*/
down_read(&cinode->lock_sem);
- if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
+ if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
server->vals->exclusive_lock_type, NULL,
- CIFS_WRITE_OP)) {
- mutex_lock(&inode->i_mutex);
- rc = __generic_file_aio_write(iocb, iov, nr_segs,
- &iocb->ki_pos);
- mutex_unlock(&inode->i_mutex);
- }
-
- if (rc > 0) {
- ssize_t err;
-
- err = generic_write_sync(file, iocb->ki_pos - rc, rc);
- if (err < 0)
- rc = err;
- }
-
+ CIFS_WRITE_OP))
+ rc = generic_file_aio_write(iocb, iov, nr_segs, pos);
up_read(&cinode->lock_sem);
return rc;
}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b375709..18cd565 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -270,6 +270,26 @@
iov->iov_len = rqst->rq_pagesz;
}
+static unsigned long
+rqst_len(struct smb_rqst *rqst)
+{
+ unsigned int i;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned long buflen = 0;
+
+ /* total up iov array first */
+ for (i = 0; i < rqst->rq_nvec; i++)
+ buflen += iov[i].iov_len;
+
+ /* add in the page array if there is one */
+ if (rqst->rq_npages) {
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
+ buflen += rqst->rq_tailsz;
+ }
+
+ return buflen;
+}
+
static int
smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
@@ -277,6 +297,7 @@
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
+ unsigned long send_length;
unsigned int i;
size_t total_len = 0, sent;
struct socket *ssocket = server->ssocket;
@@ -285,6 +306,14 @@
if (ssocket == NULL)
return -ENOTSOCK;
+ /* sanity check send length */
+ send_length = rqst_len(rqst);
+ if (send_length != smb_buf_length + 4) {
+ WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
+ send_length, smb_buf_length);
+ return -EIO;
+ }
+
cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
dump_smb(iov[0].iov_base, iov[0].iov_len);
diff --git a/fs/dcache.c b/fs/dcache.c
index 265e0ce..ca02c13 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2833,9 +2833,9 @@
u32 dlen = ACCESS_ONCE(name->len);
char *p;
- if (*buflen < dlen + 1)
- return -ENAMETOOLONG;
*buflen -= dlen + 1;
+ if (*buflen < 0)
+ return -ENAMETOOLONG;
p = *buffer -= dlen + 1;
*p++ = '/';
while (dlen--) {
diff --git a/fs/file.c b/fs/file.c
index db25c2b..eb56a13 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -683,35 +683,54 @@
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
*/
-struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed)
+static unsigned long __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
- *fput_needed = 0;
if (atomic_read(&files->count) == 1) {
file = __fcheck_files(files, fd);
- if (file && (file->f_mode & mask))
- file = NULL;
+ if (!file || unlikely(file->f_mode & mask))
+ return 0;
+ return (unsigned long)file;
} else {
file = __fget(fd, mask);
- if (file)
- *fput_needed = 1;
+ if (!file)
+ return 0;
+ return FDPUT_FPUT | (unsigned long)file;
}
-
- return file;
}
-struct file *fget_light(unsigned int fd, int *fput_needed)
+unsigned long __fdget(unsigned int fd)
{
- return __fget_light(fd, FMODE_PATH, fput_needed);
+ return __fget_light(fd, FMODE_PATH);
}
-EXPORT_SYMBOL(fget_light);
+EXPORT_SYMBOL(__fdget);
-struct file *fget_raw_light(unsigned int fd, int *fput_needed)
+unsigned long __fdget_raw(unsigned int fd)
{
- return __fget_light(fd, 0, fput_needed);
+ return __fget_light(fd, 0);
}
+unsigned long __fdget_pos(unsigned int fd)
+{
+ unsigned long v = __fdget(fd);
+ struct file *file = (struct file *)(v & ~3);
+
+ if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
+ if (file_count(file) >