Merge tag 'io_uring-5.10-2020-12-11' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe:
"Two fixes in here, fixing issues introduced in this merge window"
* tag 'io_uring-5.10-2020-12-11' of git://git.kernel.dk/linux-block:
io_uring: fix file leak on error path of io ctx creation
io_uring: fix mis-seting personality's creds
diff --git a/.mailmap b/.mailmap
index 505b3d7..225546c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -290,6 +290,7 @@
Sarangdhar Joshi <spjoshi@codeaurora.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
+Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
@@ -321,6 +322,8 @@
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
+Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Uwe Kleine-König <ukleinek@strlen.de>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
diff --git a/CREDITS b/CREDITS
index 7483019..e88d1a7 100644
--- a/CREDITS
+++ b/CREDITS
@@ -740,6 +740,11 @@
S: Portland, Oregon
S: USA
+N: Jason Cooper
+D: ARM/Marvell SOC co-maintainer
+D: irqchip co-maintainer
+D: MVEBU PCI DRIVER co-maintainer
+
N: Robin Cornelius
E: robincornelius@users.sourceforge.net
D: Ralink rt2x00 WLAN driver
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
index a22024f..9b90efc 100644
--- a/Documentation/admin-guide/bootconfig.rst
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -137,15 +137,24 @@
==============================
Since the boot configuration file is loaded with initrd, it will be added
-to the end of the initrd (initramfs) image file with size, checksum and
-12-byte magic word as below.
+to the end of the initrd (initramfs) image file with padding, size,
+checksum and 12-byte magic word as below.
-[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n]
+[initrd][bootconfig][padding][size(le32)][checksum(le32)][#BOOTCONFIG\n]
+
+The size and checksum fields are unsigned 32bit little endian value.
+
+When the boot configuration is added to the initrd image, the total
+file size is aligned to 4 bytes. To fill the gap, null characters
+(``\0``) will be added. Thus the ``size`` is the length of the bootconfig
+file + padding bytes.
The Linux kernel decodes the last part of the initrd image in memory to
get the boot configuration data.
Because of this "piggyback" method, there is no need to change or
-update the boot loader and the kernel image itself.
+update the boot loader and the kernel image itself as long as the boot
+loader passes the correct initrd file size. If by any chance, the boot
+loader passes a longer size, the kernel feils to find the bootconfig data.
To do this operation, Linux kernel provides "bootconfig" command under
tools/bootconfig, which allows admin to apply or delete the config file
@@ -176,7 +185,8 @@
contain 256 key-value pairs. In most cases, the number of config items
will be under 100 entries and smaller than 8KB, so it would be enough.
If the node number exceeds 1024, parser returns an error even if the file
-size is smaller than 32KB.
+size is smaller than 32KB. (Note that this maximum size is not including
+the padding null characters.)
Anyway, since bootconfig command verifies it when appending a boot config
to initrd image, user can notice it before boot.
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
index 03a7672..7ce06f9 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
@@ -76,6 +76,12 @@
resets:
maxItems: 1
+ wifi-2.4ghz-coexistence:
+ type: boolean
+ description: >
+ Should the pixel frequencies in the WiFi frequencies range be
+ avoided?
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
index 3613c2c..0968b40 100644
--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
@@ -33,7 +33,7 @@
spi-max-frequency = <10000000>;
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
interrupt-parent = <&gpio1>;
- interrupts = <14 GPIO_ACTIVE_LOW>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
index cfaf889..9e4dc51 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
+++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
@@ -25,7 +25,7 @@
clock-frequency = <100000>;
interrupt-parent = <&gpio1>;
- interrupts = <29 GPIO_ACTIVE_HIGH>;
+ interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
index 92f399e..2bd8256 100644
--- a/Documentation/devicetree/bindings/net/nfc/pn544.txt
+++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt
@@ -25,7 +25,7 @@
clock-frequency = <400000>;
interrupt-parent = <&gpio1>;
- interrupts = <17 GPIO_ACTIVE_HIGH>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/driver-api/media/drivers/vidtv.rst b/Documentation/driver-api/media/drivers/vidtv.rst
index 6511544..673bdff 100644
--- a/Documentation/driver-api/media/drivers/vidtv.rst
+++ b/Documentation/driver-api/media/drivers/vidtv.rst
@@ -149,11 +149,11 @@
Because the generator is implemented in a separate file, it can be
reused elsewhere in the media subsystem.
- Currently vidtv supports working with 3 PSI tables: PAT, PMT and
- SDT.
+ Currently vidtv supports working with 5 PSI tables: PAT, PMT,
+ SDT, NIT and EIT.
The specification for PAT and PMT can be found in *ISO 13818-1:
- Systems*, while the specification for the SDT can be found in *ETSI
+ Systems*, while the specification for the SDT, NIT, EIT can be found in *ETSI
EN 300 468: Specification for Service Information (SI) in DVB
systems*.
@@ -197,6 +197,8 @@
#. Their programs will be concatenated to populate the PAT
+ #. Their events will be concatenated to populate the EIT
+
#. For each program in the PAT, a PMT section will be created
#. The PMT section for a channel will be assigned its streams.
@@ -256,6 +258,42 @@
The first step to check whether the demod loaded successfully is to run::
$ dvb-fe-tool
+ Device Dummy demod for DVB-T/T2/C/S/S2 (/dev/dvb/adapter0/frontend0) capabilities:
+ CAN_FEC_1_2
+ CAN_FEC_2_3
+ CAN_FEC_3_4
+ CAN_FEC_4_5
+ CAN_FEC_5_6
+ CAN_FEC_6_7
+ CAN_FEC_7_8
+ CAN_FEC_8_9
+ CAN_FEC_AUTO
+ CAN_GUARD_INTERVAL_AUTO
+ CAN_HIERARCHY_AUTO
+ CAN_INVERSION_AUTO
+ CAN_QAM_16
+ CAN_QAM_32
+ CAN_QAM_64
+ CAN_QAM_128
+ CAN_QAM_256
+ CAN_QAM_AUTO
+ CAN_QPSK
+ CAN_TRANSMISSION_MODE_AUTO
+ DVB API Version 5.11, Current v5 delivery system: DVBC/ANNEX_A
+ Supported delivery systems:
+ DVBT
+ DVBT2
+ [DVBC/ANNEX_A]
+ DVBS
+ DVBS2
+ Frequency range for the current standard:
+ From: 51.0 MHz
+ To: 2.15 GHz
+ Step: 62.5 kHz
+ Tolerance: 29.5 MHz
+ Symbol rate ranges for the current standard:
+ From: 1.00 MBauds
+ To: 45.0 MBauds
This should return what is currently set up at the demod struct, i.e.::
@@ -314,7 +352,7 @@
here's an example::
[Channel]
- FREQUENCY = 330000000
+ FREQUENCY = 474000000
MODULATION = QAM/AUTO
SYMBOL_RATE = 6940000
INNER_FEC = AUTO
@@ -335,6 +373,14 @@
Assuming this channel is named 'channel.conf', you can then run::
$ dvbv5-scan channel.conf
+ dvbv5-scan ~/vidtv.conf
+ ERROR command BANDWIDTH_HZ (5) not found during retrieve
+ Cannot calc frequency shift. Either bandwidth/symbol-rate is unavailable (yet).
+ Scanning frequency #1 330000000
+ (0x00) Signal= -68.00dBm
+ Scanning frequency #2 474000000
+ Lock (0x1f) Signal= -34.45dBm C/N= 33.74dB UCB= 0
+ Service Beethoven, provider LinuxTV.org: digital television
For more information on dvb-scan, check its documentation online here:
`dvb-scan Documentation <https://www.linuxtv.org/wiki/index.php/Dvbscan>`_.
@@ -344,23 +390,38 @@
dvbv5-zap is a command line tool that can be used to record MPEG-TS to disk. The
typical use is to tune into a channel and put it into record mode. The example
-below - which is taken from the documentation - illustrates that::
+below - which is taken from the documentation - illustrates that\ [1]_::
- $ dvbv5-zap -c dvb_channel.conf "trilhas sonoras" -r
- using demux '/dev/dvb/adapter0/demux0'
+ $ dvbv5-zap -c dvb_channel.conf "beethoven" -o music.ts -P -t 10
+ using demux 'dvb0.demux0'
reading channels from file 'dvb_channel.conf'
- service has pid type 05: 204
- tuning to 573000000 Hz
- audio pid 104
- dvb_set_pesfilter 104
- Lock (0x1f) Quality= Good Signal= 100.00% C/N= -13.80dB UCB= 70 postBER= 3.14x10^-3 PER= 0
- DVR interface '/dev/dvb/adapter0/dvr0' can now be opened
+ tuning to 474000000 Hz
+ pass all PID's to TS
+ dvb_set_pesfilter 8192
+ dvb_dev_set_bufsize: buffer set to 6160384
+ Lock (0x1f) Quality= Good Signal= -34.66dBm C/N= 33.41dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
+ Lock (0x1f) Quality= Good Signal= -34.57dBm C/N= 33.46dB UCB= 0 postBER= 0 preBER= 1.05x10^-3 PER= 0
+ Record to file 'music.ts' started
+ received 24587768 bytes (2401 Kbytes/sec)
+ Lock (0x1f) Quality= Good Signal= -34.42dBm C/N= 33.89dB UCB= 0 postBER= 0 preBER= 2.44x10^-3 PER= 0
-The channel can be watched by playing the contents of the DVR interface, with
-some player that recognizes the MPEG-TS format, such as *mplayer* or *vlc*.
+.. [1] In this example, it records 10 seconds with all program ID's stored
+ at the music.ts file.
+
+
+The channel can be watched by playing the contents of the stream with some
+player that recognizes the MPEG-TS format, such as ``mplayer`` or ``vlc``.
By playing the contents of the stream one can visually inspect the workings of
-vidtv, e.g.::
+vidtv, e.g., to play a recorded TS file with::
+
+ $ mplayer music.ts
+
+or, alternatively, running this command on one terminal::
+
+ $ dvbv5-zap -c dvb_channel.conf "beethoven" -P -r &
+
+And, on a second terminal, playing the contents from DVR interface with::
$ mplayer /dev/dvb/adapter0/dvr0
@@ -423,3 +484,30 @@
- Updating the error statistics accordingly (e.g. BER, etc).
- Simulating some noise in the encoded data.
+
+Functions and structs used within vidtv
+---------------------------------------
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_bridge.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_channel.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_demod.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_encoder.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_mux.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_pes.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_psi.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_s302m.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_ts.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.h
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_common.c
+
+.. kernel-doc:: drivers/media/test-drivers/vidtv/vidtv_tuner.c
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
index cf3ca23..21c8478 100644
--- a/Documentation/kbuild/llvm.rst
+++ b/Documentation/kbuild/llvm.rst
@@ -57,9 +57,8 @@
They can be enabled individually. The full list of the parameters: ::
make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
- OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \
- READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \
- HOSTLD=ld.lld
+ OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump READELF=llvm-readelf \
+ HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar HOSTLD=ld.lld
Currently, the integrated assembler is disabled by default. You can pass
``LLVM_IAS=1`` to enable it.
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 2153776..4b9ed58 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -254,6 +254,32 @@
minimum, your changes should survive an ``allyesconfig`` and an
``allmodconfig`` build without new warnings or failures.
+Q: How do I post corresponding changes to user space components?
+----------------------------------------------------------------
+A: User space code exercising kernel features should be posted
+alongside kernel patches. This gives reviewers a chance to see
+how any new interface is used and how well it works.
+
+When user space tools reside in the kernel repo itself all changes
+should generally come as one series. If series becomes too large
+or the user space project is not reviewed on netdev include a link
+to a public repo where user space patches can be seen.
+
+In case user space tooling lives in a separate repository but is
+reviewed on netdev (e.g. patches to `iproute2` tools) kernel and
+user space patches should form separate series (threads) when posted
+to the mailing list, e.g.::
+
+ [PATCH net-next 0/3] net: some feature cover letter
+ └─ [PATCH net-next 1/3] net: some feature prep
+ └─ [PATCH net-next 2/3] net: some feature do it
+ └─ [PATCH net-next 3/3] selftest: net: some feature
+
+ [PATCH iproute2-next] ip: add support for some feature
+
+Posting as one thread is discouraged because it confuses patchwork
+(as of patchwork 2.2.2).
+
Q: Any other tips to help ensure my net/net-next patch gets OK'd?
-----------------------------------------------------------------
A: Attention to detail. Re-read your own work as if you were the
diff --git a/MAINTAINERS b/MAINTAINERS
index a008b70..281de21 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1486,10 +1486,20 @@
F: drivers/iommu/arm/
F: drivers/iommu/io-pgtable-arm*
+ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
+M: Arnd Bergmann <arnd@arndb.de>
+M: Olof Johansson <olof@lixom.net>
+M: soc@kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
+F: arch/arm/boot/dts/Makefile
+F: arch/arm64/boot/dts/Makefile
+
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/mach-*/
F: arch/arm/plat-*/
@@ -1724,11 +1734,13 @@
ARM/CORESIGHT FRAMEWORK AND DRIVERS
M: Mathieu Poirier <mathieu.poirier@linaro.org>
-R: Suzuki K Poulose <suzuki.poulose@arm.com>
+M: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Mike Leach <mike.leach@linaro.org>
+R: Leo Yan <leo.yan@linaro.org>
L: coresight@lists.linaro.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/coresight/linux.git
F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
F: Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
F: Documentation/devicetree/bindings/arm/coresight-cti.yaml
@@ -1995,7 +2007,6 @@
ARM/LPC32XX SOC SUPPORT
M: Vladimir Zapolskiy <vz@mleia.com>
-M: Sylvain Lemieux <slemieux.tyco@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://github.com/vzapolskiy/linux-lpc32xx.git
@@ -2013,7 +2024,6 @@
S: Maintained
ARM/Marvell Dove/MV78xx0/Orion SOC support
-M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
M: Gregory Clement <gregory.clement@bootlin.com>
@@ -2030,7 +2040,6 @@
F: drivers/soc/dove/
ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K, CN9130 SOC support
-M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
M: Gregory Clement <gregory.clement@bootlin.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -3238,7 +3247,7 @@
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
R: John Fastabend <john.fastabend@gmail.com>
-R: KP Singh <kpsingh@chromium.org>
+R: KP Singh <kpsingh@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Supported
@@ -3356,6 +3365,17 @@
F: arch/x86/net/
X: arch/x86/net/bpf_jit_comp32.c
+BPF LSM (Security Audit and Enforcement using BPF)
+M: KP Singh <kpsingh@kernel.org>
+R: Florent Revest <revest@chromium.org>
+R: Brendan Jackman <jackmanb@chromium.org>
+L: bpf@vger.kernel.org
+S: Maintained
+F: Documentation/bpf/bpf_lsm.rst
+F: include/linux/bpf_lsm.h
+F: kernel/bpf/bpf_lsm.c
+F: security/bpf/
+
BROADCOM B44 10/100 ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
@@ -3528,11 +3548,12 @@
M: Arend van Spriel <arend.vanspriel@broadcom.com>
M: Franky Lin <franky.lin@broadcom.com>
M: Hante Meuleman <hante.meuleman@broadcom.com>
-M: Chi-Hsien Lin <chi-hsien.lin@cypress.com>
-M: Wright Feng <wright.feng@cypress.com>
+M: Chi-hsien Lin <chi-hsien.lin@infineon.com>
+M: Wright Feng <wright.feng@infineon.com>
+M: Chung-hsien Hsu <chung-hsien.hsu@infineon.com>
L: linux-wireless@vger.kernel.org
L: brcm80211-dev-list.pdl@broadcom.com
-L: brcm80211-dev-list@cypress.com
+L: SHA-cyfmac-dev-list@infineon.com
S: Supported
F: drivers/net/wireless/broadcom/brcm80211/
@@ -4274,6 +4295,7 @@
C: irc://chat.freenode.net/clangbuiltlinux
F: Documentation/kbuild/llvm.rst
F: scripts/clang-tools/
+F: scripts/lld-version.sh
K: \b(?i:clang|llvm)\b
CLEANCACHE API
@@ -9067,10 +9089,7 @@
F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
-M: Johannes Berg <johannes.berg@intel.com>
-M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
M: Luca Coelho <luciano.coelho@intel.com>
-M: Intel Linux Wireless <linuxwifi@intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
@@ -9246,7 +9265,6 @@
IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de>
-M: Jason Cooper <jason@lakedaemon.net>
M: Marc Zyngier <maz@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -9646,6 +9664,7 @@
F: arch/s390/include/asm/gmap.h
F: arch/s390/include/asm/kvm*
F: arch/s390/include/uapi/asm/kvm*
+F: arch/s390/kernel/uv.c
F: arch/s390/kvm/
F: arch/s390/mm/gmap.c
F: tools/testing/selftests/kvm/*/s390x/
@@ -10544,6 +10563,13 @@
F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
F: drivers/net/ethernet/marvell/octeontx2/af/
+MARVELL PRESTERA ETHERNET SWITCH DRIVER
+M: Vadym Kochan <vkochan@marvell.com>
+M: Taras Chornyi <tchornyi@marvell.com>
+S: Supported
+W: https://github.com/Marvell-switching/switchdev-prestera
+F: drivers/net/ethernet/marvell/prestera/
+
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
@@ -13161,7 +13187,9 @@
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
L: netdev@vger.kernel.org
S: Supported
+F: Documentation/networking/page_pool.rst
F: include/net/page_pool.h
+F: include/trace/events/page_pool.h
F: net/core/page_pool.c
PANASONIC LAPTOP ACPI EXTRAS DRIVER
@@ -13389,7 +13417,6 @@
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
-M: Jason Cooper <jason@lakedaemon.net>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -14803,7 +14830,7 @@
F: drivers/net/wireless/realtek/rtlwifi/
REALTEK WIRELESS DRIVER (rtw88)
-M: Yan-Hsuan Chuang <yhchuang@realtek.com>
+M: Yan-Hsuan Chuang <tony0620emma@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/realtek/rtw88/
@@ -15776,9 +15803,8 @@
F: include/linux/slimbus.h
SFC NETWORK DRIVER
-M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M: Edward Cree <ecree@solarflare.com>
-M: Martin Habets <mhabets@solarflare.com>
+M: Edward Cree <ecree.xilinx@gmail.com>
+M: Martin Habets <habetsm.xilinx@gmail.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/sfc/
@@ -19110,12 +19136,17 @@
L: bpf@vger.kernel.org
S: Supported
F: include/net/xdp.h
+F: include/net/xdp_priv.h
F: include/trace/events/xdp.h
F: kernel/bpf/cpumap.c
F: kernel/bpf/devmap.c
F: net/core/xdp.c
-N: xdp
-K: xdp
+F: samples/bpf/xdp*
+F: tools/testing/selftests/bpf/*xdp*
+F: tools/testing/selftests/bpf/*/*xdp*
+F: drivers/net/ethernet/*/*/*/*/*xdp*
+F: drivers/net/ethernet/*/*/*xdp*
+K: (?:\b|_)xdp(?:\b|_)
XDP SOCKETS (AF_XDP)
M: Björn Töpel <bjorn.topel@intel.com>
@@ -19124,9 +19155,12 @@
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
+F: Documentation/networking/af_xdp.rst
F: include/net/xdp_sock*
F: include/net/xsk_buff_pool.h
F: include/uapi/linux/if_xdp.h
+F: include/uapi/linux/xdp_diag.h
+F: include/net/netns/xdp.h
F: net/xdp/
F: samples/bpf/xdpsock*
F: tools/lib/bpf/xsk*
diff --git a/Makefile b/Makefile
index ed081e3..9ec53d9 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@@ -433,7 +433,6 @@
OBJCOPY = llvm-objcopy
OBJDUMP = llvm-objdump
READELF = llvm-readelf
-OBJSIZE = llvm-size
STRIP = llvm-strip
else
CC = $(CROSS_COMPILE)gcc
@@ -443,7 +442,6 @@
OBJCOPY = $(CROSS_COMPILE)objcopy
OBJDUMP = $(CROSS_COMPILE)objdump
READELF = $(CROSS_COMPILE)readelf
-OBJSIZE = $(CROSS_COMPILE)size
STRIP = $(CROSS_COMPILE)strip
endif
PAHOLE = pahole
@@ -509,7 +507,7 @@
CLANG_FLAGS :=
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
+export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
@@ -828,7 +826,9 @@
DEBUG_CFLAGS += -g
endif
+ifneq ($(LLVM_IAS),1)
KBUILD_AFLAGS += -Wa,-gdwarf-2
+endif
ifdef CONFIG_DEBUG_INFO_DWARF4
DEBUG_CFLAGS += -gdwarf-4
@@ -946,7 +946,7 @@
KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# change __FILE__ to the relative path from the srctree
-KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
# ensure -fcf-protection is disabled when using retpoline as it is
# incompatible with -mindirect-branch=thunk-extern
@@ -984,6 +984,12 @@
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
endif
+# We never want expected sections to be placed heuristically by the
+# linker. All sections should be explicitly named in the linker script.
+ifdef CONFIG_LD_ORPHAN_WARN
+LDFLAGS_vmlinux += --orphan-handling=warn
+endif
+
# Align the bit size of userspace programs with the kernel
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
diff --git a/arch/Kconfig b/arch/Kconfig
index 56b6ccc..ba4e966 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1028,6 +1028,15 @@
bool
depends on HAVE_STATIC_CALL
+config ARCH_WANT_LD_ORPHAN_WARN
+ bool
+ help
+ An arch should select this symbol once all linker sections are explicitly
+ included, size-asserted, or discarded in the linker scripts. This is
+ important because we never want expected sections to be placed heuristically
+ by the linker, since the locations of such sections can change between linker
+ versions.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 7462a79..4c7b041 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -57,7 +57,7 @@
void arch_cpu_idle(void)
{
wtint(0);
- local_irq_enable();
+ raw_local_irq_enable();
}
void arch_cpu_idle_dead(void)
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index c6606f4..fb98440 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -243,10 +243,8 @@
x <<= 2;
r -= 2;
}
- if (!(x & 0x80000000u)) {
- x <<= 1;
+ if (!(x & 0x80000000u))
r -= 1;
- }
return r;
}
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index f1ed17e..1636417 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -134,8 +134,10 @@
#ifdef CONFIG_ARC_HAS_PAE40
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
#else
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/**************************************************************************
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index b23986f..f73da20 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -38,15 +38,27 @@
#ifdef CONFIG_ARC_DW2_UNWIND
-static void seed_unwind_frame_info(struct task_struct *tsk,
- struct pt_regs *regs,
- struct unwind_frame_info *frame_info)
+static int
+seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
{
- /*
- * synchronous unwinding (e.g. dump_stack)
- * - uses current values of SP and friends
- */
- if (tsk == NULL && regs == NULL) {
+ if (regs) {
+ /*
+ * Asynchronous unwinding of intr/exception
+ * - Just uses the pt_regs passed
+ */
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ } else if (tsk == NULL || tsk == current) {
+ /*
+ * synchronous unwinding (e.g. dump_stack)
+ * - uses current values of SP and friends
+ */
unsigned long fp, sp, blink, ret;
frame_info->task = current;
@@ -63,13 +75,17 @@
frame_info->regs.r31 = blink;
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
- } else if (regs == NULL) {
+ } else {
/*
- * Asynchronous unwinding of sleeping task
- * - Gets SP etc from task's pt_regs (saved bottom of kernel
- * mode stack of task)
+ * Asynchronous unwinding of a likely sleeping task
+ * - first ensure it is actually sleeping
+ * - if so, it will be in __switch_to, kernel mode SP of task
+ * is safe-kept and BLINK at a well known location in there
*/
+ if (tsk->state == TASK_RUNNING)
+ return -1;
+
frame_info->task = tsk;
frame_info->regs.r27 = TSK_K_FP(tsk);
@@ -90,19 +106,8 @@
frame_info->regs.r28 += 60;
frame_info->call_frame = 0;
- } else {
- /*
- * Asynchronous unwinding of intr/exception
- * - Just uses the pt_regs passed
- */
- frame_info->task = tsk;
-
- frame_info->regs.r27 = regs->fp;
- frame_info->regs.r28 = regs->sp;
- frame_info->regs.r31 = regs->blink;
- frame_info->regs.r63 = regs->ret;
- frame_info->call_frame = 0;
}
+ return 0;
}
#endif
@@ -116,7 +121,8 @@
unsigned int address;
struct unwind_frame_info frame_info;
- seed_unwind_frame_info(tsk, regs, &frame_info);
+ if (seed_unwind_frame_info(tsk, regs, &frame_info))
+ return 0;
while (1) {
address = UNW_PC(&frame_info);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index c340acd..9bb3c24 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -30,14 +30,14 @@
* -Changes related to MMU v2 (Rel 4.8)
*
* Vineetg: Aug 29th 2008
- * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
+ * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to
* flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
* it fails. Thus need to load it with ANY valid value before invoking
* TLBIVUTLB cmd
*
* Vineetg: Aug 21th 2008:
* -Reduced the duration of IRQ lockouts in TLB Flush routines
- * -Multiple copies of TLB erase code seperated into a "single" function
+ * -Multiple copies of TLB erase code separated into a "single" function
* -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
* in interrupt-safe region.
*
@@ -66,7 +66,7 @@
*
* Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
* much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
- * Given this, the thrasing problem should never happen because once the 3
+ * Given this, the thrashing problem should never happen because once the 3
* J-TLB entries are created (even though 3rd will knock out one of the prev
* two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
*
@@ -127,7 +127,7 @@
* There was however an obscure hardware bug, where uTLB flush would
* fail when a prior probe for J-TLB (both totally unrelated) would
* return lkup err - because the entry didn't exist in MMU.
- * The Workround was to set Index reg with some valid value, prior to
+ * The Workaround was to set Index reg with some valid value, prior to
* flush. This was fixed in MMU v3
*/
unsigned int idx;
@@ -272,7 +272,7 @@
}
/*
- * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ * Flush the entire MM for userland. The fastest way is to move to Next ASID
*/
noinline void local_flush_tlb_mm(struct mm_struct *mm)
{
@@ -303,7 +303,7 @@
* Difference between this and Kernel Range Flush is
* -Here the fastest way (if range is too large) is to move to next ASID
* without doing any explicit Shootdown
- * -In case of kernel Flush, entry has to be shot down explictly
+ * -In case of kernel Flush, entry has to be shot down explicitly
*/
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
@@ -620,7 +620,7 @@
* Super Page size is configurable in hardware (4K to 16M), but fixed once
* RTL builds.
*
- * The exact THP size a Linx configuration will support is a function of:
+ * The exact THP size a Linux configuration will support is a function of:
* - MMU page size (typical 8K, RTL fixed)
* - software page walker address split between PGD:PTE:PFN (typical
* 11:8:13, but can be changed with 1 line)
@@ -698,7 +698,7 @@
#endif
-/* Read the Cache Build Confuration Registers, Decode them and save into
+/* Read the Cache Build Configuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
@@ -803,13 +803,13 @@
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
/*
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
/*
* stack top size sanity check,
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
@@ -881,7 +881,7 @@
* the duplicate one.
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
*/
-volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
@@ -948,7 +948,7 @@
/***********************************************************************
* Diagnostic Routines
- * -Called from Low Level TLB Hanlders if things don;t look good
+ * -Called from Low Level TLB Handlers if things don;t look good
**********************************************************************/
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fe2f17eb2..002e0cf 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,6 +35,7 @@
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WANT_LD_ORPHAN_WARN
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select BUILDTIME_TABLE_SORT if MMU
select CLONE_BACKWARDS
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 4d76eab..e15f76c 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,10 +16,6 @@
KBUILD_LDFLAGS_MODULE += --be8
endif
-# We never want expected sections to be placed heuristically by the
-# linker. All sections should be explicitly named in the linker script.
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
-
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 47f001c..e156741 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -129,7 +129,9 @@
# Delete all temporary local symbols
LDFLAGS_vmlinux += -X
# Report orphan sections
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+ifdef CONFIG_LD_ORPHAN_WARN
+LDFLAGS_vmlinux += --orphan-handling=warn
+endif
# Next argument is a linker script
LDFLAGS_vmlinux += -T
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index c220dc3..243e35f 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -521,7 +521,7 @@
ranges = <0x0 0x100000 0x8000>;
mac_sw: switch@0 {
- compatible = "ti,am4372-cpsw","ti,cpsw-switch";
+ compatible = "ti,am4372-cpsw-switch", "ti,cpsw-switch";
reg = <0x0 0x4000>;
ranges = <0 0 0x4000>;
clocks = <&cpsw_125mhz_gclk>, <&dpll_clksel_mac_clk>;
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
index b69c7d4..2f32615 100644
--- a/arch/arm/boot/dts/dra76x.dtsi
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -32,8 +32,8 @@
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "int0", "int1";
- clocks = <&mcan_clk>, <&l3_iclk_div>;
- clock-names = "cclk", "hclk";
+ clocks = <&l3_iclk_div>, <&mcan_clk>;
+ clock-names = "hclk", "cclk";
bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
index 265f5f3..24f793c 100644
--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
@@ -551,7 +551,7 @@
pinctrl_i2c3: i2c3grp {
fsl,pins = <
- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
index 9390979..b9b698f 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
@@ -166,7 +166,6 @@
MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
- MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
>;
};
diff --git a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
index adde62d..342304f 100644
--- a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
+++ b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
@@ -223,8 +223,7 @@
};
&ssp3 {
- /delete-property/ #address-cells;
- /delete-property/ #size-cells;
+ #address-cells = <0>;
spi-slave;
status = "okay";
ready-gpios = <&gpio 125 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index bb3987e..0b3d9ae 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -132,7 +132,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <®_gmac_3v3>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
index fce2f7f..bf38c66 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Adam Sampson <ats@offog.org>
+ * Copyright 2015-2020 Adam Sampson <ats@offog.org>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -115,7 +115,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-s3-pinecube.dts b/arch/arm/boot/dts/sun8i-s3-pinecube.dts
index 9bab6b7..4aa0ee8 100644
--- a/arch/arm/boot/dts/sun8i-s3-pinecube.dts
+++ b/arch/arm/boot/dts/sun8i-s3-pinecube.dts
@@ -10,7 +10,7 @@
/ {
model = "PineCube IP Camera";
- compatible = "pine64,pinecube", "allwinner,sun8i-s3";
+ compatible = "pine64,pinecube", "sochip,s3", "allwinner,sun8i-v3";
aliases {
serial0 = &uart2;
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index 0c73416..89abd4c 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -539,7 +539,7 @@
gic: interrupt-controller@1c81000 {
compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
- <0x01c82000 0x1000>,
+ <0x01c82000 0x2000>,
<0x01c84000 0x2000>,
<0x01c86000 0x2000>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
index 15c22b0..4795455 100644
--- a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
+++ b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
@@ -120,7 +120,7 @@
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-supply = <®_dc1sw>;
status = "okay";
};
@@ -198,16 +198,16 @@
};
®_dc1sw {
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-name = "vcc-gmac-phy";
};
®_dcdc1 {
regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
};
®_dcdc2 {
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 34793aa..58df9fd 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -81,7 +81,6 @@
CONFIG_BINFMT_MISC=y
CONFIG_CMA=y
CONFIG_ZSMALLOC=m
-CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 3502c2f..baf7d02 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -75,6 +75,8 @@
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index fbb6693..2b85d17 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -25,6 +25,8 @@
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
*/
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 8e6ace0..9f199b1 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -71,7 +71,7 @@
arm_pm_idle();
else
cpu_do_idle();
- local_irq_enable();
+ raw_local_irq_enable();
}
void arch_cpu_idle_prepare(void)
diff --git a/arch/arm/mach-imx/anatop.c b/arch/arm/mach-imx/anatop.c
index d841bed..7bb47eb 100644
--- a/arch/arm/mach-imx/anatop.c
+++ b/arch/arm/mach-imx/anatop.c
@@ -136,7 +136,7 @@
src_np = of_find_compatible_node(NULL, NULL,
"fsl,imx6ul-src");
- src_base = of_iomap(np, 0);
+ src_base = of_iomap(src_np, 0);
of_node_put(src_np);
WARN_ON(!src_base);
sbmr2 = readl_relaxed(src_base + SRC_SBMR2);
diff --git a/arch/arm/mach-keystone/memory.h b/arch/arm/mach-keystone/memory.h
index 9147565..1b9ed12 100644
--- a/arch/arm/mach-keystone/memory.h
+++ b/arch/arm/mach-keystone/memory.h
@@ -6,9 +6,6 @@
#ifndef __MEMORY_H
#define __MEMORY_H
-#define MAX_PHYSMEM_BITS 36
-#define SECTION_SIZE_BITS 34
-
#define KEYSTONE_LOW_PHYS_START 0x80000000ULL
#define KEYSTONE_LOW_PHYS_SIZE 0x80000000ULL /* 2G */
#define KEYSTONE_LOW_PHYS_END (KEYSTONE_LOW_PHYS_START + \
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 144b9ca..a720259 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -288,7 +288,7 @@
.dev_id = "ohci",
.table = {
/* Power GPIO on the I2C-attached TPS65010 */
- GPIO_LOOKUP("i2c-tps65010", 1, "power", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent",
GPIO_ACTIVE_HIGH),
},
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 3ee7bdf..3f62a0c 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -7,7 +7,6 @@
depends on ARCH_MULTI_V6
select ARCH_OMAP2PLUS
select CPU_V6
- select PM_GENERIC_DOMAINS if PM
select SOC_HAS_OMAP2_SDRC
config ARCH_OMAP3
@@ -106,6 +105,8 @@
select OMAP_DM_TIMER
select OMAP_GPMC
select PINCTRL
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM
select RESET_CONTROLLER
select SOC_BUS
select TI_SYSC
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index a92d277..c8d317f 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -175,8 +175,11 @@
if (mpuss_can_lose_context) {
error = cpu_cluster_pm_enter();
if (error) {
- omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
- goto cpu_cluster_pm_out;
+ index = 0;
+ cx = state_ptr + index;
+ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ mpuss_can_lose_context = 0;
}
}
}
@@ -184,7 +187,6 @@
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;
-cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 06da274..1963572 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -66,6 +66,7 @@
"allwinner,sun8i-h2-plus",
"allwinner,sun8i-h3",
"allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
"allwinner,sun8i-v3s",
NULL,
};
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1515f6f..a6b5b7e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -81,6 +81,7 @@
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
+ select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 5789c2d..6a87d59 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -28,10 +28,6 @@
endif
endif
-# We never want expected sections to be placed heuristically by the
-# linker. All sections should be explicitly named in the linker script.
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
-
ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
$(warning LSE atomics not supported by binutils)
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 9ebb9e0..d406974 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -79,7 +79,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <®_dc1sw>;
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
index 4f9ba53..9d93fe1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
@@ -96,7 +96,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <®_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
index fceb298..29a081e 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
@@ -27,7 +27,7 @@
&emac {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ext_rgmii_phy>;
phy-supply = <®_gmac_3v3>;
allwinner,rx-delay-ps = <200>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
index 55259f9..aef8f2b 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
@@ -5,20 +5,20 @@
usb {
compatible = "simple-bus";
dma-ranges;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x68500000 0x00400000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
usbphy0: usb-phy@0 {
compatible = "brcm,sr-usb-combo-phy";
- reg = <0x00000000 0x100>;
+ reg = <0x0 0x00000000 0x0 0x100>;
#phy-cells = <1>;
status = "disabled";
};
xhci0: usb@1000 {
compatible = "generic-xhci";
- reg = <0x00001000 0x1000>;
+ reg = <0x0 0x00001000 0x0 0x1000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0 1>, <&usbphy0 0>;
phy-names = "phy0", "phy1";
@@ -28,7 +28,7 @@
bdc0: usb@2000 {
compatible = "brcm,bdc-v0.16";
- reg = <0x00002000 0x1000>;
+ reg = <0x0 0x00002000 0x0 0x1000>;
interrupts = <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0 0>, <&usbphy0 1>;
phy-names = "phy0", "phy1";
@@ -38,21 +38,21 @@
usbphy1: usb-phy@10000 {
compatible = "brcm,sr-usb-combo-phy";
- reg = <0x00010000 0x100>;
+ reg = <0x0 0x00010000 0x0 0x100>;
#phy-cells = <1>;
status = "disabled";
};
usbphy2: usb-phy@20000 {
compatible = "brcm,sr-usb-hs-phy";
- reg = <0x00020000 0x100>;
+ reg = <0x0 0x00020000 0x0 0x100>;
#phy-cells = <0>;
status = "disabled";
};
xhci1: usb@11000 {
compatible = "generic-xhci";
- reg = <0x00011000 0x1000>;
+ reg = <0x0 0x00011000 0x0 0x1000>;
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>;
phy-names = "phy0", "phy1", "phy2";
@@ -62,7 +62,7 @@
bdc1: usb@21000 {
compatible = "brcm,bdc-v0.16";
- reg = <0x00021000 0x1000>;
+ reg = <0x0 0x00021000 0x0 0x1000>;
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy2>;
phy-names = "phy0";
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 381a84912..c28d51c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -10,18 +10,6 @@
model = "NVIDIA Jetson TX2 Developer Kit";
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
- aconnect {
- status = "okay";
-
- dma-controller@2930000 {
- status = "okay";
- };
-
- interrupt-controller@2a40000 {
- status = "okay";
- };
- };
-
i2c@3160000 {
power-monitor@42 {
compatible = "ti,ina3221";
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
index a2893be..0dc8304 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
@@ -54,7 +54,7 @@
status = "okay";
};
- serial@c280000 {
+ serial@3100000 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index e9c90f0..93438d2 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -1161,7 +1161,7 @@
hsp_aon: hsp@c150000 {
compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
- reg = <0x0c150000 0xa0000>;
+ reg = <0x0c150000 0x90000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index e18e1a9..a9caaf7c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1663,16 +1663,6 @@
vin-supply = <&vdd_5v0_sys>;
};
- vdd_usb_vbus_otg: regulator@11 {
- compatible = "regulator-fixed";
- regulator-name = "USB_VBUS_EN0";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&vdd_5v0_sys>;
- };
-
vdd_hdmi: regulator@10 {
compatible = "regulator-fixed";
regulator-name = "VDD_HDMI_5V0";
@@ -1712,4 +1702,14 @@
enable-active-high;
vin-supply = <&vdd_3v3_sys>;
};
+
+ vdd_usb_vbus_otg: regulator@14 {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_VBUS_EN0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts b/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts
index f6e6a24..b5d9a552 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-sim-vdk.dts
@@ -8,7 +8,7 @@
compatible = "nvidia,tegra234-vdk", "nvidia,tegra234";
aliases {
- sdhci3 = "/cbb@0/sdhci@3460000";
+ mmc3 = "/bus@0/mmc@3460000";
serial0 = &uarta;
};
@@ -17,12 +17,12 @@
stdout-path = "serial0:115200n8";
};
- cbb@0 {
+ bus@0 {
serial@3100000 {
status = "okay";
};
- sdhci@3460000 {
+ mmc@3460000 {
status = "okay";
bus-width = <8>;
non-removable;
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index a94dac76..59e0cbf 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -179,22 +179,22 @@
};
soc: soc {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0 0xffffffff>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x0 0xffffffff>;
dma-ranges;
compatible = "simple-bus";
prng: qrng@e1000 {
compatible = "qcom,prng-ee";
- reg = <0xe3000 0x1000>;
+ reg = <0x0 0xe3000 0x0 0x1000>;
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
};
cryptobam: dma@704000 {
compatible = "qcom,bam-v1.7.0";
- reg = <0x00704000 0x20000>;
+ reg = <0x0 0x00704000 0x0 0x20000>;
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_CRYPTO_AHB_CLK>;
clock-names = "bam_clk";
@@ -206,7 +206,7 @@
crypto: crypto@73a000 {
compatible = "qcom,crypto-v5.1";
- reg = <0x0073a000 0x6000>;
+ reg = <0x0 0x0073a000 0x0 0x6000>;
clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
<&gcc GCC_CRYPTO_AXI_CLK>,
<&gcc GCC_CRYPTO_CLK>;
@@ -217,7 +217,7 @@
tlmm: pinctrl@1000000 {
compatible = "qcom,ipq6018-pinctrl";
- reg = <0x01000000 0x300000>;
+ reg = <0x0 0x01000000 0x0 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
#gpio-cells = <2>;
@@ -235,7 +235,7 @@
gcc: gcc@1800000 {
compatible = "qcom,gcc-ipq6018";
- reg = <0x01800000 0x80000>;
+ reg = <0x0 0x01800000 0x0 0x80000>;
clocks = <&xo>, <&sleep_clk>;
clock-names = "xo", "sleep_clk";
#clock-cells = <1>;
@@ -244,17 +244,17 @@
tcsr_mutex_regs: syscon@1905000 {
compatible = "syscon";
- reg = <0x01905000 0x8000>;
+ reg = <0x0 0x01905000 0x0 0x8000>;
};
tcsr_q6: syscon@1945000 {
compatible = "syscon";
- reg = <0x01945000 0xe000>;
+ reg = <0x0 0x01945000 0x0 0xe000>;
};
blsp_dma: dma@7884000 {
compatible = "qcom,bam-v1.7.0";
- reg = <0x07884000 0x2b000>;
+ reg = <0x0 0x07884000 0x0 0x2b000>;
interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "bam_clk";
@@ -264,7 +264,7 @@
blsp1_uart3: serial@78b1000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x078b1000 0x200>;
+ reg = <0x0 0x078b1000 0x0 0x200>;
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
@@ -276,7 +276,7 @@
compatible = "qcom,spi-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b5000 0x600>;
+ reg = <0x0 0x078b5000 0x0 0x600>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
spi-max-frequency = <50000000>;
clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
@@ -291,7 +291,7 @@
compatible = "qcom,spi-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b6000 0x600>;
+ reg = <0x0 0x078b6000 0x0 0x600>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
spi-max-frequency = <50000000>;
clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
@@ -306,7 +306,7 @@
compatible = "qcom,i2c-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b6000 0x600>;
+ reg = <0x0 0x078b6000 0x0 0x600>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
@@ -321,7 +321,7 @@
compatible = "qcom,i2c-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x078b7000 0x600>;
+ reg = <0x0 0x078b7000 0x0 0x600>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
@@ -336,24 +336,24 @@
compatible = "qcom,msm-qgic2";
interrupt-controller;
#interrupt-cells = <0x3>;
- reg = <0x0b000000 0x1000>, /*GICD*/
- <0x0b002000 0x1000>, /*GICC*/
- <0x0b001000 0x1000>, /*GICH*/
- <0x0b004000 0x1000>; /*GICV*/
+ reg = <0x0 0x0b000000 0x0 0x1000>, /*GICD*/
+ <0x0 0x0b002000 0x0 0x1000>, /*GICC*/
+ <0x0 0x0b001000 0x0 0x1000>, /*GICH*/
+ <0x0 0x0b004000 0x0 0x1000>; /*GICV*/
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
watchdog@b017000 {
compatible = "qcom,kpss-wdt";
interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
- reg = <0x0b017000 0x40>;
+ reg = <0x0 0x0b017000 0x0 0x40>;
clocks = <&sleep_clk>;
timeout-sec = <10>;
};
apcs_glb: mailbox@b111000 {
compatible = "qcom,ipq6018-apcs-apps-global";
- reg = <0x0b111000 0x1000>;
+ reg = <0x0 0x0b111000 0x0 0x1000>;
#clock-cells = <1>;
clocks = <&a53pll>, <&xo>;
clock-names = "pll", "xo";
@@ -362,7 +362,7 @@
a53pll: clock@b116000 {
compatible = "qcom,ipq6018-a53pll";
- reg = <0x0b116000 0x40>;
+ reg = <0x0 0x0b116000 0x0 0x40>;
#clock-cells = <0>;
clocks = <&xo>;
clock-names = "xo";
@@ -377,68 +377,68 @@
};
timer@b120000 {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
compatible = "arm,armv7-timer-mem";
- reg = <0x0b120000 0x1000>;
+ reg = <0x0 0x0b120000 0x0 0x1000>;
clock-frequency = <19200000>;
frame@b120000 {
frame-number = <0>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b121000 0x1000>,
- <0x0b122000 0x1000>;
+ reg = <0x0 0x0b121000 0x0 0x1000>,
+ <0x0 0x0b122000 0x0 0x1000>;
};
frame@b123000 {
frame-number = <1>;
interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb123000 0x1000>;
+ reg = <0x0 0xb123000 0x0 0x1000>;
status = "disabled";
};
frame@b124000 {
frame-number = <2>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b124000 0x1000>;
+ reg = <0x0 0x0b124000 0x0 0x1000>;
status = "disabled";
};
frame@b125000 {
frame-number = <3>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b125000 0x1000>;
+ reg = <0x0 0x0b125000 0x0 0x1000>;
status = "disabled";
};
frame@b126000 {
frame-number = <4>;
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b126000 0x1000>;
+ reg = <0x0 0x0b126000 0x0 0x1000>;
status = "disabled";
};
frame@b127000 {
frame-number = <5>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b127000 0x1000>;
+ reg = <0x0 0x0b127000 0x0 0x1000>;
status = "disabled";
};
frame@b128000 {
frame-number = <6>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0b128000 0x1000>;
+ reg = <0x0 0x0b128000 0x0 0x1000>;
status = "disabled";
};
};
q6v5_wcss: remoteproc@cd00000 {
compatible = "qcom,ipq8074-wcss-pil";
- reg = <0x0cd00000 0x4040>,
- <0x004ab000 0x20>;
+ reg = <0x0 0x0cd00000 0x0 0x4040>,
+ <0x0 0x004ab000 0x0 0x20>;
reg-names = "qdsp6",
"rmb";
interrupts-extended = <&intc GIC_SPI 325 IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
index 35bd6b9..3376810 100644
--- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
@@ -243,7 +243,6 @@
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&pmic_int>;
- rockchip,system-power-controller;
wakeup-source;
#clock-cells = <1>;
clock-output-names = "rk808-clkout1", "xin32k";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
index be7a31d..2ee07d1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
@@ -20,7 +20,7 @@
gmac_clk: gmac-clock {
compatible = "fixed-clock";
clock-frequency = <125000000>;
- clock-output-names = "gmac_clk";
+ clock-output-names = "gmac_clkin";
#clock-cells = <0>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index e7a459f..2030907 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -74,14 +74,14 @@
label = "red:diy";
gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
default-state = "off";
- linux,default-trigger = "mmc1";
+ linux,default-trigger = "mmc2";
};
yellow_led: led-2 {
label = "yellow:yellow-led";
gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
default-state = "off";
- linux,default-trigger = "mmc0";
+ linux,default-trigger = "mmc1";
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index ada724b..7a9a7ac 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -29,6 +29,9 @@
i2c6 = &i2c6;
i2c7 = &i2c7;
i2c8 = &i2c8;
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index ec213b4..1c26d7b 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -128,6 +128,9 @@
{
unsigned long flags = regs->pstate & DAIF_MASK;
+ if (interrupts_enabled(regs))
+ trace_hardirqs_on();
+
/*
* We can't use local_daif_restore(regs->pstate) here as
* system_has_prio_mask_debugging() won't restore the I bit if it can
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 99b9383..0756191 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -31,7 +31,12 @@
return esr;
}
+asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
+asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void enter_from_user_mode(void);
+asmlinkage void exit_to_user_mode(void);
+void arm64_enter_nmi(struct pt_regs *regs);
+void arm64_exit_nmi(struct pt_regs *regs);
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 4ff12a7..5628289 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -115,8 +115,6 @@
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_young(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
@@ -124,9 +122,12 @@
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
* remapped as PROT_NONE but are yet to be flushed from the TLB.
+ * Note that we can't make any assumptions based on the state of the access
+ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
+ * TLB.
*/
#define pte_accessible(mm, pte) \
- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
+ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
* p??_access_permitted() is true for valid user mappings (subject to the
@@ -164,13 +165,6 @@
return pmd;
}
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
- return pte;
-}
-
static inline pte_t pte_mkwrite(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
@@ -196,6 +190,20 @@
return pte;
}
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ /*
+ * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+ * clear), set the PTE_DIRTY bit.
+ */
+ if (pte_hw_dirty(pte))
+ pte = pte_mkdirty(pte);
+
+ pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+ return pte;
+}
+
static inline pte_t pte_mkold(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_AF));
@@ -845,12 +853,6 @@
pte = READ_ONCE(*ptep);
do {
old_pte = pte;
- /*
- * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
- * clear), set the PTE_DIRTY bit.
- */
- if (pte_hw_dirty(pte))
- pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
index 4266262..0069467 100644
--- a/arch/arm64/include/asm/probes.h
+++ b/arch/arm64/include/asm/probes.h
@@ -7,6 +7,8 @@
#ifndef _ARM_PROBES_H
#define _ARM_PROBES_H
+#include <asm/insn.h>
+
typedef u32 probe_opcode_t;
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 997cf8c..28c85b8 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -193,6 +193,10 @@
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
u64 pmr_save;
u64 stackframe[2];
+
+ /* Only valid for some EL1 exceptions. */
+ u64 lockdep_hardirqs;
+ u64 exit_rcu;
};
static inline bool in_syscall(struct pt_regs const *regs)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index e2ef4c2..801861d 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -987,7 +987,7 @@
#define SYS_TFSR_EL1_TF0_SHIFT 0
#define SYS_TFSR_EL1_TF1_SHIFT 1
#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
-#define SYS_TFSR_EL1_TF1 (UK(2) << SYS_TFSR_EL1_TF1_SHIFT)
+#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 43d4c32..70e0a75 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -17,40 +17,164 @@
#include <asm/mmu.h>
#include <asm/sysreg.h>
-static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
+/*
+ * This is intended to match the logic in irqentry_enter(), handling the kernel
+ * mode transitions only.
+ */
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+{
+ regs->exit_rcu = false;
+
+ if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter();
+ trace_hardirqs_off_finish();
+
+ regs->exit_rcu = true;
+ return;
+ }
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter_check_tick();
+ trace_hardirqs_off_finish();
+}
+
+/*
+ * This is intended to match the logic in irqentry_exit(), handling the kernel
+ * mode transitions only, and with preemption handled elsewhere.
+ */
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (interrupts_enabled(regs)) {
+ if (regs->exit_rcu) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ rcu_irq_exit();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ return;
+ }
+
+ trace_hardirqs_on();
+ } else {
+ if (regs->exit_rcu)
+ rcu_irq_exit();
+ }
+}
+
+void noinstr arm64_enter_nmi(struct pt_regs *regs)
+{
+ regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+ __nmi_enter();
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirq_enter();
+ rcu_nmi_enter();
+
+ trace_hardirqs_off_finish();
+ ftrace_nmi_enter();
+}
+
+void noinstr arm64_exit_nmi(struct pt_regs *regs)
+{
+ bool restore = regs->lockdep_hardirqs;
+
+ ftrace_nmi_exit();
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+
+ rcu_nmi_exit();
+ lockdep_hardirq_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ __nmi_exit();
+}
+
+asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ arm64_enter_nmi(regs);
+ else
+ enter_from_kernel_mode(regs);
+}
+
+asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ arm64_exit_nmi(regs);
+ else
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
far = untagged_addr(far);
do_mem_abort(far, esr, regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_abort);
-static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_pc);
-static void notrace el1_undef(struct pt_regs *regs)
+static void noinstr el1_undef(struct pt_regs *regs)
{
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_undefinstr(regs);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_undef);
-static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
{
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
bad_mode(regs, 0, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_inv);
-static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
+{
+ regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_nmi_enter();
+
+ trace_hardirqs_off_finish();
+}
+
+static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
+{
+ bool restore = regs->lockdep_hardirqs;
+
+ if (restore) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ }
+
+ rcu_nmi_exit();
+ if (restore)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -62,18 +186,21 @@
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ arm64_enter_el1_dbg(regs);
do_debug_exception(far, esr, regs);
+ arm64_exit_el1_dbg(regs);
}
-NOKPROBE_SYMBOL(el1_dbg);
-static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{
+ enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_ptrauth_fault(regs, esr);
+ local_daif_mask();
+ exit_to_kernel_mode(regs);
}
-NOKPROBE_SYMBOL(el1_fpac);
-asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -106,20 +233,34 @@
el1_inv(regs, esr);
}
}
-NOKPROBE_SYMBOL(el1_sync_handler);
-static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
+asmlinkage void noinstr enter_from_user_mode(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+ trace_hardirqs_off_finish();
+}
+
+asmlinkage void noinstr exit_to_user_mode(void)
+{
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ user_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
far = untagged_addr(far);
do_mem_abort(far, esr, regs);
}
-NOKPROBE_SYMBOL(el0_da);
-static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -131,90 +272,80 @@
if (!is_ttbr0_addr(far))
arm64_apply_bp_hardening();
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs);
}
-NOKPROBE_SYMBOL(el0_ia);
-static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_acc(esr, regs);
}
-NOKPROBE_SYMBOL(el0_fpsimd_acc);
-static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sve_acc(esr, regs);
}
-NOKPROBE_SYMBOL(el0_sve_acc);
-static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_fpsimd_exc(esr, regs);
}
-NOKPROBE_SYMBOL(el0_fpsimd_exc);
-static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sysinstr(esr, regs);
}
-NOKPROBE_SYMBOL(el0_sys);
-static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(far, esr, regs);
}
-NOKPROBE_SYMBOL(el0_pc);
-static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(regs->sp, esr, regs);
}
-NOKPROBE_SYMBOL(el0_sp);
-static void notrace el0_undef(struct pt_regs *regs)
+static void noinstr el0_undef(struct pt_regs *regs)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_undefinstr(regs);
}
-NOKPROBE_SYMBOL(el0_undef);
-static void notrace el0_bti(struct pt_regs *regs)
+static void noinstr el0_bti(struct pt_regs *regs)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_bti(regs);
}
-NOKPROBE_SYMBOL(el0_bti);
-static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
bad_el0_sync(regs, 0, esr);
}
-NOKPROBE_SYMBOL(el0_inv);
-static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
{
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1);
@@ -222,30 +353,28 @@
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
- user_exit_irqoff();
+ enter_from_user_mode();
do_debug_exception(far, esr, regs);
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
-NOKPROBE_SYMBOL(el0_dbg);
-static void notrace el0_svc(struct pt_regs *regs)
+static void noinstr el0_svc(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ enter_from_user_mode();
do_el0_svc(regs);
}
-NOKPROBE_SYMBOL(el0_svc);
-static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_ptrauth_fault(regs, esr);
}
-NOKPROBE_SYMBOL(el0_fpac);
-asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -297,27 +426,25 @@
el0_inv(regs, esr);
}
}
-NOKPROBE_SYMBOL(el0_sync_handler);
#ifdef CONFIG_COMPAT
-static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
- user_exit_irqoff();
+ enter_from_user_mode();
local_daif_restore(DAIF_PROCCTX);
do_cp15instr(esr, regs);
}
-NOKPROBE_SYMBOL(el0_cp15);
-static void notrace el0_svc_compat(struct pt_regs *regs)
+static void noinstr el0_svc_compat(struct pt_regs *regs)
{
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ enter_from_user_mode();
do_el0_svc_compat(regs);
}
-NOKPROBE_SYMBOL(el0_svc_compat);
-asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -360,5 +487,4 @@
el0_inv(regs, esr);
}
}
-NOKPROBE_SYMBOL(el0_sync_compat_handler);
#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index b295fb9..d72c818 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -30,18 +30,18 @@
#include <asm/unistd.h>
/*
- * Context tracking subsystem. Used to instrument transitions
- * between user and kernel mode.
+ * Context tracking and irqflag tracing need to instrument transitions between
+ * user and kernel mode.
*/
- .macro ct_user_exit_irqoff
-#ifdef CONFIG_CONTEXT_TRACKING
+ .macro user_exit_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl enter_from_user_mode
#endif
.endm
- .macro ct_user_enter
-#ifdef CONFIG_CONTEXT_TRACKING
- bl context_tracking_user_enter
+ .macro user_enter_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
+ bl exit_to_user_mode
#endif
.endm
@@ -298,9 +298,6 @@
alternative_else_nop_endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
- .if \el == 0
- ct_user_enter
- .endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
@@ -637,16 +634,8 @@
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- test_irqs_unmasked res=x0, pmr=x20
- cbz x0, 1f
- bl asm_nmi_enter
-1:
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
+ mov x0, sp
+ bl enter_el1_irq_or_nmi
irq_handler
@@ -665,26 +654,8 @@
1:
#endif
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- /*
- * When using IRQ priority masking, we can get spurious interrupts while
- * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
- * section with interrupts disabled. Skip tracing in those cases.
- */
- test_irqs_unmasked res=x0, pmr=x20
- cbz x0, 1f
- bl asm_nmi_exit
-1:
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- test_irqs_unmasked res=x0, pmr=x20
- cbnz x0, 1f
-#endif
- bl trace_hardirqs_on
-1:
-#endif
+ mov x0, sp
+ bl exit_el1_irq_or_nmi
kernel_exit 1
SYM_CODE_END(el1_irq)
@@ -726,21 +697,14 @@
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
- ct_user_exit_irqoff
+ user_exit_irqoff
enable_da_f
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
-
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-#endif
b ret_to_user
SYM_CODE_END(el0_irq)
@@ -759,7 +723,7 @@
el0_error_naked:
mrs x25, esr_el1
gic_prio_kentry_setup tmp=x2
- ct_user_exit_irqoff
+ user_exit_irqoff
enable_dbg
mov x0, sp
mov x1, x25
@@ -774,13 +738,17 @@
SYM_CODE_START_LOCAL(ret_to_user)
disable_daif
gic_prio_kentry_setup tmp=x3
- ldr x1, [tsk, #TSK_TI_FLAGS]
- and x2, x1, #_TIF_WORK_MASK
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
+ ldr x19, [tsk, #TSK_TI_FLAGS]
+ and x2, x19, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
+ user_enter_irqoff
/* Ignore asynchronous tag check faults in the uaccess routines */
clear_mte_async_tcf
- enable_step_tsk x1, x2
+ enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase
#endif
@@ -791,11 +759,9 @@
*/
work_pending:
mov x0, sp // 'regs'
+ mov x1, x19
bl do_notify_resume
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on // enabled while in userspace
-#endif
- ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
+ ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
SYM_CODE_END(ret_to_user)
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 9cf2fb8..60456a6 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -67,18 +67,3 @@
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
}
-
-/*
- * Stubs to make nmi_enter/exit() code callable from ASM
- */
-asmlinkage void notrace asm_nmi_enter(void)
-{
- nmi_enter();
-}
-NOKPROBE_SYMBOL(asm_nmi_enter);
-
-asmlinkage void notrace asm_nmi_exit(void)
-{
- nmi_exit();
-}
-NOKPROBE_SYMBOL(asm_nmi_exit);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a47a40e..ed919f6 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -72,13 +72,13 @@
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-static void __cpu_do_idle(void)
+static void noinstr __cpu_do_idle(void)
{
dsb(sy);
wfi();
}
-static void __cpu_do_idle_irqprio(void)
+static void noinstr __cpu_do_idle_irqprio(void)
{
unsigned long pmr;
unsigned long daif_bits;
@@ -108,7 +108,7 @@
* ensure that interrupts are not masked at the PMR (because the core will
* not wake up if we block the wake up signal in the interrupt controller).
*/
-void cpu_do_idle(void)
+void noinstr cpu_do_idle(void)
{
if (system_uses_irq_prio_masking())
__cpu_do_idle_irqprio();
@@ -119,14 +119,14 @@
/*
* This is our default idle handler.
*/
-void arch_cpu_idle(void)
+void noinstr arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
* tricks
*/
cpu_do_idle();
- local_irq_enable();
+ raw_local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 7689f20..793c46d 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -10,6 +10,7 @@
#include <linux/uaccess.h>
#include <asm/alternative.h>
+#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/ptrace.h>
@@ -223,16 +224,16 @@
}
-asmlinkage __kprobes notrace unsigned long
+asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
unsigned long ret;
- nmi_enter();
+ arm64_enter_nmi(regs);
ret = _sdei_handler(regs, arg);
- nmi_exit();
+ arm64_exit_nmi(regs);
return ret;
}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index e4c0dad..f8f758e 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -121,7 +121,6 @@
cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
- user_exit();
if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
/*
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 8af4e0e..2059d8f 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -34,6 +34,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/exception.h>
#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
@@ -753,8 +754,10 @@
* bad_mode handles the impossible case in the exception vector. This is always
* fatal.
*/
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
+ arm64_enter_nmi(regs);
+
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
@@ -786,7 +789,7 @@
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
-asmlinkage void handle_bad_stack(struct pt_regs *regs)
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
@@ -794,6 +797,8 @@
unsigned int esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
+ arm64_enter_nmi(regs);
+
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
@@ -865,24 +870,17 @@
}
}
-asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
{
- nmi_enter();
+ arm64_enter_nmi(regs);
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
- nmi_exit();
+ arm64_exit_nmi(regs);
}
-asmlinkage void enter_from_user_mode(void)
-{
- CT_WARN_ON(ct_state() != CONTEXT_USER);
- user_exit_irqoff();
-}
-NOKPROBE_SYMBOL(enter_from_user_mode);
-
/* GENERIC_BUG traps */
int is_valid_bugaddr(unsigned long addr)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index bb2d986..a797aba 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -13,6 +13,11 @@
SECTIONS {
HYP_SECTION(.text)
+ /*
+ * .hyp..data..percpu needs to be page aligned to maintain the same
+ * alignment for when linking into vmlinux.
+ */
+ . = ALIGN(PAGE_SIZE);
HYP_SECTION_NAME(.data..percpu) : {
PERCPU_INPUT(L1_CACHE_BYTES)
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 52d6f24..15a6c98 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -273,6 +273,23 @@
return extract_bytes(value, addr & 7, len);
}
+static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ int target_vcpu_id = vcpu->vcpu_id;
+ u64 value;
+
+ value = (u64)(mpidr & GENMASK(23, 0)) << 32;
+ value |= ((target_vcpu_id & 0xffff) << 8);
+
+ if (vgic_has_its(vcpu->kvm))
+ value |= GICR_TYPER_PLPIS;
+
+ /* reporting of the Last bit is not supported for userspace */
+ return extract_bytes(value, addr & 7, len);
+}
+
static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
@@ -593,8 +610,9 @@
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit),
- REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
- vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
+ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
+ vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
+ vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1ee9400..795d224 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -789,25 +789,6 @@
*/
static void debug_exception_enter(struct pt_regs *regs)
{
- /*
- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
- * already disabled to preserve the last enabled/disabled addresses.
- */
- if (interrupts_enabled(regs))
- trace_hardirqs_off();
-
- if (user_mode(regs)) {
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- } else {
- /*
- * We might have interrupted pretty much anything. In
- * fact, if we're a debug exception, we can even interrupt
- * NMI processing. We don't want this code makes in_nmi()
- * to return true, but we need to notify RCU.
- */
- rcu_nmi_enter();
- }
-
preempt_disable();
/* This code is a bit fragile. Test it. */
@@ -818,12 +799,6 @@
static void debug_exception_exit(struct pt_regs *regs)
{
preempt_enable_no_resched();
-
- if (!user_mode(regs))
- rcu_nmi_exit();
-
- if (interrupts_enabled(regs))
- trace_hardirqs_on();
}
NOKPROBE_SYMBOL(debug_exception_exit);
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index f730869..69af6bc8 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -102,6 +102,6 @@
#ifdef CONFIG_CPU_PM_STOP
asm volatile("stop\n");
#endif
- local_irq_enable();
+ raw_local_irq_enable();
}
#endif
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index aea0a40..bc1364d 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -57,7 +57,7 @@
*/
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
__asm__("sleep");
}
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 5a0a95d..67767c5 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -44,7 +44,7 @@
{
__vmwait();
/* interrupts wake us up, but irqs are still disabled */
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 6b61a70..c9ff879 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -239,7 +239,7 @@
if (mark_idle)
(*mark_idle)(1);
- safe_halt();
+ raw_safe_halt();
if (mark_idle)
(*mark_idle)(0);
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a9e46e5..f998607 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -149,5 +149,5 @@
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
}
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index a950fc1..6c0532d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -154,6 +154,7 @@
#if defined(CONFIG_XPA)
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t
pfn_pte(unsigned long pfn, pgprot_t prot)
@@ -169,6 +170,7 @@
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
@@ -183,6 +185,7 @@
#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#ifdef CONFIG_CPU_VR41XX
#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 5bc3b04..18e69eb 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -33,19 +33,19 @@
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
- local_irq_enable();
+ raw_local_irq_enable();
}
static void __cpuidle r39xx_wait(void)
{
if (!need_resched())
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
- local_irq_enable();
+ raw_local_irq_enable();
}
void __cpuidle r4k_wait(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
__r4k_wait();
}
@@ -64,7 +64,7 @@
" .set arch=r4000 \n"
" wait \n"
" .set pop \n");
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
@@ -84,7 +84,7 @@
" wait \n"
" mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n");
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
@@ -257,7 +257,7 @@
if (cpu_wait)
cpu_wait();
else
- local_irq_enable();
+ raw_local_irq_enable();
}
#ifdef CONFIG_CPU_IDLE
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index 4ffe857..50b4eb1 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -33,7 +33,7 @@
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 0ff391f..3c98728 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -79,7 +79,7 @@
*/
void arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
if (mfspr(SPR_UPR) & SPR_UPR_PMP)
mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index f196d96..a92a23d6a 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -169,7 +169,7 @@
void __cpuidle arch_cpu_idle(void)
{
- local_irq_enable();
+ raw_local_irq_enable();
/* nop on real hardware, qemu will idle sleep. */
asm volatile("or %%r10,%%r10,%%r10\n":::);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e9f13fe..5181872 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -152,6 +152,7 @@
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a4d56f0..5c8c062 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -123,7 +123,6 @@
LDFLAGS_vmlinux-y := -Bstatic
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
-LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn)
ifdef CONFIG_PPC64
ifeq ($(call cc-option-yn,-mcmodel=medium),y)
@@ -248,7 +247,6 @@
cpu-as-$(CONFIG_40x) += -Wa,-m405
cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
-cpu-as-$(CONFIG_E200) += -Wa,-me200
cpu-as-$(CONFIG_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 36443cd..1376be9 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -36,8 +36,10 @@
*/
#ifdef CONFIG_PTE_64BIT
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index 28716e2..a39e2d1 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -63,6 +63,8 @@
#else /* !__ASSEMBLY__ */
+#include <linux/jump_label.h>
+
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#ifdef CONFIG_PPC_KUAP
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index e0b5294..7509184 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -242,6 +242,18 @@
static inline void radix_init_pseries(void) { };
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+#define arch_clear_mm_cpumask_cpu(cpu, mm) \
+ do { \
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
+ atomic_dec(&(mm)->context.active_cpus); \
+ cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
+ } \
+ } while (0)
+
+void cleanup_cpu_mmu_context(void);
+#endif
+
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
{
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index ee2243ba..96522f7 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -153,8 +153,10 @@
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f63a3d3..4d01f09 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1000,8 +1000,6 @@
* Vectors for the FWNMI option. Share common code.
*/
TRAMP_REAL_BEGIN(system_reset_fwnmi)
- /* XXX: fwnmi guest could run a nested/PR guest, so why no test? */
- __IKVM_REAL(system_reset)=0
GEN_INT_ENTRY system_reset, virt=0
#endif /* CONFIG_PPC_PSERIES */
@@ -1412,6 +1410,11 @@
* If none is found, do a Linux page fault. Linux page faults can happen in
* kernel mode due to user copy operations of course.
*
+ * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
+ * MMU context, which may cause a DSI in the host, which must go to the
+ * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
+ * always be used regardless of AIL setting.
+ *
* - Radix MMU
* The hardware loads from the Linux page table directly, so a fault goes
* immediately to Linux page fault.
@@ -1422,10 +1425,8 @@
IVEC=0x300
IDAR=1
IDSISR=1
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_SKIP=1
IKVM_REAL=1
-#endif
INT_DEFINE_END(data_access)
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
@@ -1464,6 +1465,8 @@
* ppc64_bolted_size (first segment). The kernel handler must avoid stomping
* on user-handler data structures.
*
+ * KVM: Same as 0x300, DSLB must test for KVM guest.
+ *
* A dedicated save area EXSLB is used (XXX: but it actually need not be
* these days, we could use EXGEN).
*/
@@ -1472,10 +1475,8 @@
IAREA=PACA_EXSLB
IRECONCILE=0
IDAR=1
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_SKIP=1
IKVM_REAL=1
-#endif
INT_DEFINE_END(data_access_slb)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 2aa16d5..a0dda2a 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -156,6 +156,7 @@
bl initial_bats
bl load_segment_registers
BEGIN_MMU_FTR_SECTION
+ bl reloc_offset
bl early_hash_table
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
#if defined(CONFIG_BOOTX_TEXT)
@@ -920,7 +921,7 @@
ori r6, r6, 3 /* 256kB table */
mtspr SPRN_SDR1, r6
lis r6, early_hash@h
- lis r3, Hash@ha
+ addis r3, r3, Hash@ha
stw r6, Hash@l(r3)
blr
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index ae0e263..1f83553 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -52,9 +52,9 @@
* interrupts enabled, some don't.
*/
if (irqs_disabled())
- local_irq_enable();
+ raw_local_irq_enable();
} else {
- local_irq_enable();
+ raw_local_irq_enable();
/*
* Go into low thread priority and possibly
* low power mode.
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 85215e7..a0ebc29 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1214,12 +1214,9 @@
static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
{
/* We have a block of xive->nr_servers VPs. We just need to check
- * raw vCPU ids are below the expected limit for this guest's
- * core stride ; kvmppc_pack_vcpu_id() will pack them down to an
- * index that can be safely used to compute a VP id that belongs
- * to the VP block.
+ * packed vCPU ids are below that.
*/
- return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
+ return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
}
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index d0c2db0..a59a94f 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -251,6 +251,13 @@
}
state = &sb->irq_state[src];
+
+ /* Some sanity checking */
+ if (!state->valid) {
+ pr_devel("%s: source %lx invalid !\n", __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
kvmppc_xive_select_irq(state, &hw_num, &xd);
arch_spin_lock(&sb->lock);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 5e14798..55b4a8b 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,7 +5,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o \
+obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
init-common.o mmu_context.o drmem.o
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 0203cdf..52e170b 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -68,7 +68,7 @@
rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
- : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
+ : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
: "memory");
}
@@ -92,16 +92,15 @@
asm volatile("ptesync": : :"memory");
/*
- * Flush the first set of the TLB, and any caching of partition table
- * entries. Then flush the remaining sets of the TLB. Hash mode uses
- * partition scoped TLB translations.
+ * Flush the partition table cache if this is HV mode.
*/
- tlbiel_hash_set_isa300(0, is, 0, 2, 0);
- for (set = 1; set < num_sets; set++)
- tlbiel_hash_set_isa300(set, is, 0, 0, 0);
+ if (early_cpu_has_feature(CPU_FTR_HVMODE))
+ tlbiel_hash_set_isa300(0, is, 0, 2, 0);
/*
- * Now invalidate the process table cache.
+ * Now invalidate the process table cache. UPRT=0 HPT modes (what
+ * current hardware implements) do not use the process table, but
+ * add the flushes anyway.
*
* From ISA v3.0B p. 1078:
* The following forms are invalid.
@@ -110,6 +109,14 @@
*/
tlbiel_hash_set_isa300(0, is, 0, 2, 1);
+ /*
+ * Then flush the sets of the TLB proper. Hash mode uses
+ * partition scoped TLB translations, which may be flushed
+ * in !HV mode.
+ */
+ for (set = 0; set < num_sets; set++)
+ tlbiel_hash_set_isa300(set, is, 0, 0, 0);
+
ppc_after_tlbiel_barrier();
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 1c54821d..0c85572 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@@ -307,3 +308,22 @@
isync();
}
#endif
+
+/**
+ * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
+ *
+ * This clears the CPU from mm_cpumask for all processes, and then flushes the
+ * local TLB to ensure TLB coherency in case the CPU is onlined again.
+ *
+ * KVM guest translations are not necessarily flushed here. If KVM started
+ * using mm_cpumask or the Linux APIs which do, this would have to be resolved.
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+void cleanup_cpu_mmu_context(void)
+{
+ int cpu = smp_processor_id();
+
+ clear_tasks_mm_cpumask(cpu);
+ tlbiel_all();
+}
+#endif
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
new file mode 100644
index 0000000..fa9a7a7
--- /dev/null
+++ b/arch/powerpc/mm/maccess.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ return is_kernel_addr((unsigned long)unsafe_src);
+}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 63f61d8..f2bf98b 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -742,8 +742,7 @@
of_node_put(cpu);
}
- if (likely(nid > 0))
- node_set_online(nid);
+ node_set_online(nid);
}
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 74ebe66..adae2a6 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -911,6 +911,8 @@
mpic_cpu_set_priority(0xf);
+ cleanup_cpu_mmu_context();
+
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 4611523..4426a10 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -211,11 +211,16 @@
add_preferred_console("hvc", 0, NULL);
if (!radix_enabled()) {
+ size_t size = sizeof(struct slb_entry) * mmu_slb_size;
int i;
/* Allocate per cpu area to save old slb contents during MCE */
- for_each_possible_cpu(i)
- paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i));
+ for_each_possible_cpu(i) {
+ paca_ptrs[i]->mce_faulty_slbs =
+ memblock_alloc_node(size,
+ __alignof__(struct slb_entry),
+ cpu_to_node(i));
+ }
}
}
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 54c4ba4..cbb6781 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -143,6 +143,9 @@
xive_smp_disable_cpu();
else
xics_migrate_irqs_away();
+
+ cleanup_cpu_mmu_context();
+
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index f2837e3..a02012f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -90,6 +90,9 @@
xive_smp_disable_cpu();
else
xics_migrate_irqs_away();
+
+ cleanup_cpu_mmu_context();
+
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 133f6ad..b3ac245 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -458,7 +458,8 @@
return hwirq;
}
- virq = irq_create_mapping(NULL, hwirq);
+ virq = irq_create_mapping_affinity(NULL, hwirq,
+ entry->affinity);
if (!virq) {
pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
index b0ab66e..5b2e79e 100644
--- a/arch/riscv/include/asm/pgtable-32.h
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -14,4 +14,6 @@
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 34
+
#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
index 82a5693..134388c 100644
--- a/arch/riscv/include/asm/vdso/processor.h
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -4,6 +4,8 @@
#ifndef __ASSEMBLY__
+#include <asm/barrier.h>
+
static inline void cpu_relax(void)
{
#ifdef __riscv_muldiv
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 19225ec..dd5f985 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -36,7 +36,7 @@
void arch_cpu_idle(void)
{
wait_for_interrupt();
- local_irq_enable();
+ raw_local_irq_enable();
}
void show_regs(struct pt_regs *regs)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index c424cc6..117f321 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -75,6 +75,7 @@
*cmdline_p = boot_command_line;
early_ioremap_setup();
+ jump_label_init();
parse_early_param();
efi_init();
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index cb8f9e4..0cfd6da 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -44,7 +44,7 @@
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- -Wl,--build-id -Wl,--hash-style=both
+ -Wl,--build-id=sha1 -Wl,--hash-style=both
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld --just-symbols we can then
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2012c1c..483051e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -53,11 +53,11 @@
/* stack_frame offsets */
OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
OFFSET(__SF_GPRS, stack_frame, gprs);
- OFFSET(__SF_EMPTY, stack_frame, empty1);
- OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
- OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
- OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
- OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
+ OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
+ OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
+ OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
+ OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
+ OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
BLANK();
OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
BLANK();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5346545..92beb14 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -763,12 +763,7 @@
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
- tmhh %r8,0x300
- jz 1f
TRACE_IRQS_OFF
-1:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs
@@ -791,12 +786,7 @@
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lio_work
.Lio_restore:
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
- tm __PT_PSW(%r11),3
- jno 0f
TRACE_IRQS_ON
-0:
-#endif
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lio_exit_kernel
@@ -976,12 +966,7 @@
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
- tmhh %r8,0x300
- jz 1f
TRACE_IRQS_OFF
-1:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT
@@ -1068,6 +1053,7 @@
* %r4
*/
load_fpu_regs:
+ stnsm __SF_EMPTY(%r15),0xfc
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
@@ -1099,6 +1085,7 @@
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
.Lload_fpu_regs_exit:
+ ssm __SF_EMPTY(%r15)
BR_EX %r14
.Lload_fpu_regs_end:
ENDPROC(load_fpu_regs)
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index f7f1e64..2b85096 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -33,10 +33,10 @@
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
- local_irq_save(flags);
+ raw_local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
/* Account time spent with enabled wait psw loaded as idle time. */
raw_write_seqcount_begin(&idle->seqcount);
@@ -123,7 +123,7 @@
void arch_cpu_idle(void)
{
enabled_wait();
- local_irq_enable();
+ raw_local_irq_enable();
}
void arch_cpu_idle_exit(void)
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 14bd9d5..883bfed 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -129,8 +129,15 @@
.paddr = paddr
};
- if (uv_call(0, (u64)&uvcb))
+ if (uv_call(0, (u64)&uvcb)) {
+ /*
+ * Older firmware uses 107/d as an indication of a non secure
+ * page. Let us emulate the newer variant (no-op).
+ */
+ if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
+ return 0;
return -EINVAL;
+ }
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6b74b92..425d3d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2312,7 +2312,7 @@
struct kvm_s390_pv_unp unp = {};
r = -EINVAL;
- if (!kvm_s390_pv_is_protected(kvm))
+ if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
break;
r = -EFAULT;
@@ -3564,7 +3564,6 @@
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->todpr = 0;
- vcpu->arch.sie_block->cpnc = 0;
}
}
@@ -3582,7 +3581,6 @@
regs->etoken = 0;
regs->etoken_extension = 0;
- regs->diag318 = 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index eb99e2f..f5847f9 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -208,7 +208,6 @@
return -EIO;
}
kvm->arch.gmap->guest_handle = uvcb.guest_handle;
- atomic_set(&kvm->mm->context.is_protected, 1);
return 0;
}
@@ -228,6 +227,8 @@
*rrc = uvcb.header.rrc;
KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
*rc, *rrc);
+ if (!cc)
+ atomic_set(&kvm->mm->context.is_protected, 1);
return cc ? -EINVAL : 0;
}
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index daca7ba..8c0c68e 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -33,7 +33,7 @@
static void __udelay_disabled(unsigned long long usecs)
{
- unsigned long cr0, cr0_new, psw_mask, flags;
+ unsigned long cr0, cr0_new, psw_mask;
struct s390_idle_data idle;
u64 end;
@@ -45,9 +45,8 @@
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
set_clock_comparator(end);
set_cpu_flag(CIF_IGNORE_IRQ);
- local_irq_save(flags);
psw_idle(&idle, psw_mask);
- local_irq_restore(flags);
+ trace_hardirqs_off();
clear_cpu_flag(CIF_IGNORE_IRQ);
set_clock_comparator(S390_lowcore.clock_comparator);
__ctl_load(cr0, 0, 0);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index cfb0017..64795d0 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2690,6 +2690,8 @@
#include <linux/sched/mm.h>
void s390_reset_acc(struct mm_struct *mm)
{
+ if (!mm_is_protected(mm))
+ return;
/*
* we might be called during
* reset: we walk the pages and clear
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 743f257..75217fb 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -103,9 +103,10 @@
{
struct msi_desc *entry = irq_get_msi_desc(data->irq);
struct msi_msg msg = entry->msg;
+ int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
msg.address_lo &= 0xff0000ff;
- msg.address_lo |= (cpumask_first(dest) << 8);
+ msg.address_lo |= (cpu_addr << 8);
pci_write_msi_msg(data->irq, &msg);
return IRQ_SET_MASK_OK;
@@ -238,6 +239,7 @@
unsigned long bit;
struct msi_desc *msi;
struct msi_msg msg;
+ int cpu_addr;
int rc, irq;
zdev->aisb = -1UL;
@@ -287,9 +289,15 @@
handle_percpu_irq);
msg.data = hwirq - bit;
if (irq_delivery == DIRECTED) {
+ if (msi->affinity)
+ cpu = cpumask_first(&msi->affinity->mask);
+ else
+ cpu = 0;
+ cpu_addr = smp_cpu_get_cpu_address(cpu);
+
msg.address_lo = zdev->msi_addr & 0xff0000ff;
- msg.address_lo |= msi->affinity ?
- (cpumask_first(&msi->affinity->mask) << 8) : 0;
+ msg.address_lo |= (cpu_addr << 8);
+
for_each_possible_cpu(cpu) {
airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
}
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0dc0f52..f598149 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
void default_idle(void)
{
set_bl_bit();
- local_irq_enable();
+ raw_local_irq_enable();
/* Isn't this racy ? */
cpu_sleep();
clear_bl_bit();
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 065e2d4..396f46b 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -50,7 +50,7 @@
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
/* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
+ raw_local_irq_enable();
__asm__ __volatile__ (
"wr %%g0, %%asr19\n"
@@ -66,7 +66,7 @@
static void pmc_leon_idle(void)
{
/* Interrupts need to be enabled to not hang the CPU */
- local_irq_enable();
+ raw_local_irq_enable();
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index adfcaea..a023637 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -74,7 +74,7 @@
{
if (sparc_idle)
(*sparc_idle)();
- local_irq_enable();
+ raw_local_irq_enable();
}
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index a75093b..6f8c782 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -62,11 +62,11 @@
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
- local_irq_enable();
+ raw_local_irq_enable();
} else {
unsigned long pstate;
- local_irq_enable();
+ raw_local_irq_enable();
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index 0c0268e..d839956 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -71,7 +71,7 @@
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
- mov 1, %o3
+ mov -1, %o3
clr %o4
andcc %g1, 0x3, %g0
bne,pn %icc, 95f
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 3bed095..9505a7e 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -217,7 +217,7 @@
{
cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
um_idle_sleep();
- local_irq_enable();
+ raw_local_irq_enable();
}
int __cant_sleep(void) {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f6946b8..fbf26e0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -100,6 +100,7 @@
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_HUGE_PMD_SHARE
+ select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
select BUILDTIME_TABLE_SORT
select CLKEVT_I8253
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 154259f1..1bf2174 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -209,9 +209,6 @@
LDFLAGS_vmlinux += -z max-page-size=0x200000
endif
-# We never want expected sections to be placed heuristically by the
-# linker. All sections should be explicitly named in the linker script.
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index ee24908..40b8fd3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -61,7 +61,9 @@
# Compressed kernel should be built as PIE since it may be loaded at any
# address by the bootloader.
LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker)
-LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn)
+ifdef CONFIG_LD_ORPHAN_WARN
+LDFLAGS_vmlinux += --orphan-handling=warn
+endif
LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c
index 954cb27..27826c2 100644
--- a/arch/x86/boot/compressed/sev-es.c
+++ b/arch/x86/boot/compressed/sev-es.c
@@ -32,13 +32,12 @@
*/
static bool insn_has_rep_prefix(struct insn *insn)
{
+ insn_byte_t p;
int i;
insn_get_prefixes(insn);
- for (i = 0; i < insn->prefixes.nbytes; i++) {
- insn_byte_t p = insn->prefixes.bytes[i];
-
+ for_each_insn_prefix(insn, i, p) {
if (p == 0xf2 || p == 0xf3)
return true;
}
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index b47cc42..485c506 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1916,7 +1916,7 @@
* that caused the PEBS record. It's called collision.
* If collision happened, the record will be dropped.
*/
- if (p->status != (1ULL << bit)) {
+ if (pebs_status != (1ULL << bit)) {
for_each_set_bit(i, (unsigned long *)&pebs_status, size)
error[i]++;
continue;
@@ -1940,7 +1940,7 @@
if (error[bit]) {
perf_log_lost_samples(event, error[bit]);
- if (perf_event_account_interrupt(event))
+ if (iregs && perf_event_account_interrupt(event))
x86_pmu_stop(event, 0);
}
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 5c1ae3e..a8c3d28 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -201,6 +201,21 @@
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
+/**
+ * for_each_insn_prefix() -- Iterate prefixes in the instruction
+ * @insn: Pointer to struct insn.
+ * @idx: Index storage.
+ * @prefix: Prefix byte.
+ *
+ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
+ * and the index is stored in @idx (note that this @idx is just for a cursor,
+ * do not change it.)
+ * Since prefixes.nbytes can be bigger than 4 if some prefixes
+ * are repeated, it cannot be used for looping over the prefixes.
+ */
+#define for_each_insn_prefix(insn, idx, prefix) \
+ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
+
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 324ddd7..7e5f33a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1656,6 +1656,7 @@
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index e039a93..29dd27b 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -88,8 +88,6 @@
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
- trace_hardirqs_on();
-
mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1b98f8c..235f5cd 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -161,7 +161,7 @@
/* UV4/4A only have a revision difference */
case UV4_HUB_PART_NUMBER:
uv_min_hub_revision_id = node_id.s.revision
- + UV4_HUB_REVISION_BASE;
+ + UV4_HUB_REVISION_BASE - 1;
uv_hub_type_set(UV4);
if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE)
uv_hub_type_set(UV4|UV4A);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 581fb72..d41b70f 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -739,11 +739,13 @@
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ spectre_v2_user_ibpb = mode;
switch (cmd) {
case SPECTRE_V2_USER_CMD_FORCE:
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
static_branch_enable(&switch_mm_always_ibpb);
+ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
break;
case SPECTRE_V2_USER_CMD_PRCTL:
case SPECTRE_V2_USER_CMD_AUTO:
@@ -757,8 +759,6 @@
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
-
- spectre_v2_user_ibpb = mode;
}
/*
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 4102b86..32b7099 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1384,8 +1384,10 @@
* When there's any problem use only local no_way_out state.
*/
if (!lmce) {
- if (mce_end(order) < 0)
- no_way_out = worst >= MCE_PANIC_SEVERITY;
+ if (mce_end(order) < 0) {
+ if (!no_way_out)
+ no_way_out = worst >= MCE_PANIC_SEVERITY;
+ }
} else {
/*
* If there was a fatal machine check we should have
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index e5f4ee8..e8b5f1c 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -570,6 +570,8 @@
if (d) {
cpumask_set_cpu(cpu, &d->cpu_mask);
+ if (r->cache.arch_has_per_cpu_cfg)
+ rdt_domain_reconfigure_cdp(r);
return;
}
@@ -923,6 +925,7 @@
r->rid == RDT_RESOURCE_L2CODE) {
r->cache.arch_has_sparse_bitmaps = false;
r->cache.arch_has_empty_bitmaps = false;
+ r->cache.arch_has_per_cpu_cfg = false;
} else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_THRTL_BASE;
r->msr_update = mba_wrmsr_intel;
@@ -943,6 +946,7 @@
r->rid == RDT_RESOURCE_L2CODE) {
r->cache.arch_has_sparse_bitmaps = true;
r->cache.arch_has_empty_bitmaps = true;
+ r->cache.arch_has_per_cpu_cfg = true;
} else if (r->rid == RDT_RESOURCE_MBA) {
r->msr_base = MSR_IA32_MBA_BW_BASE;
r->msr_update = mba_wrmsr_amd;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 80fa997..f65d3c0 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -360,6 +360,8 @@
* executing entities
* @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
* @arch_has_empty_bitmaps: True if the '0' bitmap is valid.
+ * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
+ * level has CPU scope.
*/
struct rdt_cache {
unsigned int cbm_len;
@@ -369,6 +371,7 @@
unsigned int shareable_bits;
bool arch_has_sparse_bitmaps;
bool arch_has_empty_bitmaps;
+ bool arch_has_per_cpu_cfg;
};
/**
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index af323e2..f341842 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -507,6 +507,24 @@
return ret ?: nbytes;
}
+/**
+ * rdtgroup_remove - the helper to remove resource group safely
+ * @rdtgrp: resource group to remove
+ *
+ * On resource group creation via a mkdir, an extra kernfs_node reference is
+ * taken to ensure that the rdtgroup structure remains accessible for the
+ * rdtgroup_kn_unlock() calls where it is removed.
+ *
+ * Drop the extra reference here, then free the rdtgroup structure.
+ *
+ * Return: void
+ */
+static void rdtgroup_remove(struct rdtgroup *rdtgrp)
+{
+ kernfs_put(rdtgrp->kn);
+ kfree(rdtgrp);
+}
+
struct task_move_callback {
struct callback_head work;
struct rdtgroup *rdtgrp;
@@ -529,7 +547,7 @@
(rdtgrp->flags & RDT_DELETED)) {
current->closid = 0;
current->rmid = 0;
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
}
if (unlikely(current->flags & PF_EXITING))
@@ -1769,7 +1787,6 @@
if (IS_ERR(kn_subdir))
return PTR_ERR(kn_subdir);
- kernfs_get(kn_subdir);
ret = rdtgroup_kn_set_ugid(kn_subdir);
if (ret)
return ret;
@@ -1792,7 +1809,6 @@
kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
if (IS_ERR(kn_info))
return PTR_ERR(kn_info);
- kernfs_get(kn_info);
ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
if (ret)
@@ -1813,12 +1829,6 @@
goto out_destroy;
}
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that @rdtgrp->kn is always accessible.
- */
- kernfs_get(kn_info);
-
ret = rdtgroup_kn_set_ugid(kn_info);
if (ret)
goto out_destroy;
@@ -1847,12 +1857,6 @@
if (dest_kn)
*dest_kn = kn;
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that @rdtgrp->kn is always accessible.
- */
- kernfs_get(kn);
-
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@@ -1905,8 +1909,13 @@
r_l = &rdt_resources_all[level];
list_for_each_entry(d, &r_l->domains, list) {
- /* Pick one CPU from each domain instance to update MSR */
- cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ if (r_l->cache.arch_has_per_cpu_cfg)
+ /* Pick all the CPUs in the domain instance */
+ for_each_cpu(cpu, &d->cpu_mask)
+ cpumask_set_cpu(cpu, cpu_mask);
+ else
+ /* Pick one CPU from each domain instance to update MSR */
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
}
cpu = get_cpu();
/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
@@ -2079,8 +2088,7 @@
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
rdtgroup_pseudo_lock_remove(rdtgrp);
kernfs_unbreak_active_protection(kn);
- kernfs_put(rdtgrp->kn);
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
} else {
kernfs_unbreak_active_protection(kn);
}
@@ -2139,13 +2147,11 @@
&kn_mongrp);
if (ret < 0)
goto out_info;
- kernfs_get(kn_mongrp);
ret = mkdir_mondata_all(rdtgroup_default.kn,
&rdtgroup_default, &kn_mondata);
if (ret < 0)
goto out_mongrp;
- kernfs_get(kn_mondata);
rdtgroup_default.mon.mon_data_kn = kn_mondata;
}
@@ -2357,7 +2363,7 @@
if (atomic_read(&sentry->waitcount) != 0)
sentry->flags = RDT_DELETED;
else
- kfree(sentry);
+ rdtgroup_remove(sentry);
}
}
@@ -2399,7 +2405,7 @@
if (atomic_read(&rdtgrp->waitcount) != 0)
rdtgrp->flags = RDT_DELETED;
else
- kfree(rdtgrp);
+ rdtgroup_remove(rdtgrp);
}
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
update_closid_rmid(cpu_online_mask, &rdtgroup_default);
@@ -2499,11 +2505,6 @@
if (IS_ERR(kn))
return PTR_ERR(kn);
- /*
- * This extra ref will be put in kernfs_remove() and guarantees
- * that kn is always accessible.
- */
- kernfs_get(kn);
ret = rdtgroup_kn_set_ugid(kn);
if (ret)
goto out_destroy;
@@ -2838,8 +2839,8 @@
/*
* kernfs_remove() will drop the reference count on "kn" which
* will free it. But we still need it to stick around for the
- * rdtgroup_kn_unlock(kn} call below. Take one extra reference
- * here, which will be dropped inside rdtgroup_kn_unlock().
+ * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
+ * which will be dropped by kernfs_put() in rdtgroup_remove().
*/
kernfs_get(kn);
@@ -2880,6 +2881,7 @@
out_idfree:
free_rmid(rdtgrp->mon.rmid);
out_destroy:
+ kernfs_put(rdtgrp->kn);
kernfs_remove(rdtgrp->kn);
out_free_rgrp:
kfree(rdtgrp);
@@ -2892,7 +2894,7 @@
{
kernfs_remove(rgrp->kn);
free_rmid(rgrp->mon.rmid);
- kfree(rgrp);
+ rdtgroup_remove(rgrp);
}
/*
@@ -3049,11 +3051,6 @@
WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
list_del(&rdtgrp->mon.crdtgrp_list);
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
@@ -3065,11 +3062,6 @@
rdtgrp->flags = RDT_DELETED;
list_del(&rdtgrp->rdtgroup_list);
- /*
- * one extra hold on this, will drop when we kfree(rdtgrp)
- * in rdtgroup_kn_unlock()
- */
- kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
return 0;
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ba4593a..145a7ac 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -685,7 +685,7 @@
*/
void __cpuidle default_idle(void)
{
- safe_halt();
+ raw_safe_halt();
}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
@@ -736,6 +736,8 @@
/*
* AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
* states (local apic timer and TSC stop).
+ *
+ * XXX this function is completely buggered vs RCU and tracing.
*/
static void amd_e400_idle(void)
{
@@ -757,9 +759,9 @@
* The switch back from broadcast mode needs to be called with
* interrupts disabled.
*/
- local_irq_disable();
+ raw_local_irq_disable();
tick_broadcast_exit();
- local_irq_enable();
+ raw_local_irq_enable();
}
/*
@@ -801,9 +803,9 @@
if (!need_resched())
__sti_mwait(0, 0);
else
- local_irq_enable();
+ raw_local_irq_enable();
} else {
- local_irq_enable();
+ raw_local_irq_enable();
}
__current_clr_polling();
}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 420be87..ae64f98 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -514,13 +514,10 @@
if (!tboot_enabled())
return 0;
- if (no_iommu || swiotlb || dmar_disabled)
+ if (no_iommu || dmar_disabled)
pr_warn("Forcing Intel-IOMMU to enabled\n");
dmar_disabled = 0;
-#ifdef CONFIG_SWIOTLB
- swiotlb = 0;
-#endif
no_iommu = 0;
return 1;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 3fdaa04..138bdb1 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -255,12 +255,13 @@
static bool is_prefix_bad(struct insn *insn)
{
+ insn_byte_t p;
int i;
- for (i = 0; i < insn->prefixes.nbytes; i++) {
+ for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_ES):
case INAT_MAKE_PREFIX(INAT_PFX_CS):
@@ -715,6 +716,7 @@
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
{
u8 opc1 = OPCODE1(insn);
+ insn_byte_t p;
int i;
switch (opc1) {
@@ -746,8 +748,8 @@
* Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
* No one uses these insns, reject any branch insns with such prefix.
*/
- for (i = 0; i < insn->prefixes.nbytes; i++) {
- if (insn->prefixes.bytes[i] == 0x66)
+ for_each_insn_prefix(insn, i, p) {
+ if (p == 0x66)
return -ENOTSUPP;
}
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 99d118f..814698e 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -40,29 +40,10 @@
* check if there is pending interrupt from
* non-APIC source without intack.
*/
-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
-{
- u8 accept = kvm_apic_accept_pic_intr(v);
-
- if (accept) {
- if (irqchip_split(v->kvm))
- return pending_userspace_extint(v);
- else
- return v->kvm->arch.vpic->output;
- } else
- return 0;
-}
-
-/*
- * check if there is injectable interrupt:
- * when virtual interrupt delivery enabled,
- * interrupt from apic will handled by hardware,
- * we don't need to check it here.
- */
-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
/*
- * FIXME: interrupt.injected represents an interrupt that it's
+ * FIXME: interrupt.injected represents an interrupt whose
* side-effects have already been applied (e.g. bit from IRR
* already moved to ISR). Therefore, it is incorrect to rely
* on interrupt.injected to know if there is a pending
@@ -75,6 +56,23 @@
if (!lapic_in_kernel(v))
return v->arch.interrupt.injected;
+ if (!kvm_apic_accept_pic_intr(v))
+ return 0;
+
+ if (irqchip_split(v->kvm))
+ return pending_userspace_extint(v);
+ else
+ return v->kvm->arch.vpic->output;
+}
+
+/*
+ * check if there is injectable interrupt:
+ * when virtual interrupt delivery enabled,
+ * interrupt from apic will handled by hardware,
+ * we don't need to check it here.
+ */
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+{
if (kvm_cpu_has_extint(v))
return 1;
@@ -91,20 +89,6 @@
*/
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
- /*
- * FIXME: interrupt.injected represents an interrupt that it's
- * side-effects have already been applied (e.g. bit from IRR
- * already moved to ISR). Therefore, it is incorrect to rely
- * on interrupt.injected to know if there is a pending
- * interrupt in the user-mode LAPIC.
- * This leads to nVMX/nSVM not be able to distinguish
- * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
- * pending interrupt or should re-inject an injected
- * interrupt.
- */
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.injected;
-
if (kvm_cpu_has_extint(v))
return 1;
@@ -118,16 +102,21 @@
*/
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
{
- if (kvm_cpu_has_extint(v)) {
- if (irqchip_split(v->kvm)) {
- int vector = v->arch.pending_external_vector;
-
- v->arch.pending_external_vector = -1;
- return vector;
- } else
- return kvm_pic_read_irq(v->kvm); /* PIC */
- } else
+ if (!kvm_cpu_has_extint(v)) {
+ WARN_ON(!lapic_in_kernel(v));
return -1;
+ }
+
+ if (!lapic_in_kernel(v))
+ return v->arch.interrupt.nr;
+
+ if (irqchip_split(v->kvm)) {
+ int vector = v->arch.pending_external_vector;
+
+ v->arch.pending_external_vector = -1;
+ return vector;
+ } else
+ return kvm_pic_read_irq(v->kvm); /* PIC */
}
/*
@@ -135,13 +124,7 @@
*/
int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
{
- int vector;
-
- if (!lapic_in_kernel(v))
- return v->arch.interrupt.nr;
-
- vector = kvm_cpu_get_extint(v);
-
+ int vector = kvm_cpu_get_extint(v);
if (vector != -1)
return vector; /* PIC */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 105e785..86c33d5 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2465,7 +2465,7 @@
struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr;
- if (!kvm_apic_hw_enabled(apic))
+ if (!kvm_apic_present(vcpu))
return -1;
__apic_update_ppr(apic, &ppr);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5bb1939..7a6ae9e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3517,7 +3517,7 @@
{
u64 sptes[PT64_ROOT_MAX_LEVEL];
struct rsvd_bits_validate *rsvd_check;
- int root = vcpu->arch.mmu->root_level;
+ int root = vcpu->arch.mmu->shadow_root_level;
int leaf;
int level;
bool reserved = false;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index c0b1410..566f4d1 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -642,8 +642,8 @@
* Its safe to read more than we are asked, caller should ensure that
* destination has enough space.
*/
- src_paddr = round_down(src_paddr, 16);
offset = src_paddr & 15;
+ src_paddr = round_down(src_paddr, 16);
sz = round_up(sz + offset, 16);
return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1e81cfe..79b3a56 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1309,8 +1309,10 @@
svm->avic_is_running = true;
svm->msrpm = svm_vcpu_alloc_msrpm();
- if (!svm->msrpm)
+ if (!svm->msrpm) {
+ err = -ENOMEM;
goto error_free_vmcb_page;
+ }
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 078a39d..e545a8a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4051,21 +4051,23 @@
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
+ /*
+ * We can accept userspace's request for interrupt injection
+ * as long as we have a place to store the interrupt number.
+ * The actual injection will happen when the CPU is able to
+ * deliver the interrupt.
+ */
+ if (kvm_cpu_has_extint(vcpu))
+ return false;
+
+ /* Acknowledging ExtINT does not happen if LINT0 is masked. */
return (!lapic_in_kernel(vcpu) ||
kvm_apic_accept_pic_intr(vcpu));
}
-/*
- * if userspace requested an interrupt window, check that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
return kvm_arch_interrupt_allowed(vcpu) &&
- !kvm_cpu_has_interrupt(vcpu) &&
- !kvm_event_needs_reinjection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
}
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 58f7fb9..4229950 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -63,13 +63,12 @@
*/
bool insn_has_rep_prefix(struct insn *insn)
{
+ insn_byte_t p;
int i;
insn_get_prefixes(insn);
- for (i = 0; i < insn->prefixes.nbytes; i++) {
- insn_byte_t p = insn->prefixes.bytes[i];
-
+ for_each_insn_prefix(insn, i, p) {
if (p == 0xf2 || p == 0xf3)
return true;
}
@@ -95,14 +94,15 @@
{
int idx = INAT_SEG_REG_DEFAULT;
int num_overrides = 0, i;
+ insn_byte_t p;
insn_get_prefixes(insn);
/* Look for any segment override prefixes. */
- for (i = 0; i < insn->prefixes.nbytes; i++) {
+ for_each_insn_prefix(insn, i, p) {
insn_attr_t attr;
- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ attr = inat_get_opcode_attribute(p);
switch (attr) {
case INAT_MAKE_PREFIX(INAT_PFX_CS):
idx = INAT_SEG_REG_CS;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index bcf5e45..97b7c28 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -144,7 +144,7 @@
static inline unsigned get_max_io_size(struct request_queue *q,
struct bio *bio)
{
- unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+ unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
unsigned max_sectors = sectors;
unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9741d1d..659cdb8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -547,7 +547,10 @@
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
- t->chunk_sectors = lcm_not_zero(t->chunk_sectors, b->chunk_sectors);
+
+ /* Set non-power-of-2 compatible chunk_sectors boundary */
+ if (b->chunk_sectors)
+ t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c
index 35abcb1..86f8195 100644
--- a/block/keyslot-manager.c
+++ b/block/keyslot-manager.c
@@ -103,6 +103,13 @@
spin_lock_init(&ksm->idle_slots_lock);
slot_hashtable_size = roundup_pow_of_two(num_slots);
+ /*
+ * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
+ * buckets. This only makes a difference when there is only 1 keyslot.
+ */
+ if (slot_hashtable_size < 2)
+ slot_hashtable_size = 2;
+
ksm->log_slot_ht_size = ilog2(slot_hashtable_size);
ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size,
sizeof(ksm->slot_hashtable[0]),
diff --git a/drivers/Makefile b/drivers/Makefile
index c0cd1b9..5762280 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -145,6 +145,7 @@
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_RING) += vhost/
+obj-$(CONFIG_VHOST_IOTLB) += vhost/
obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_GREYBUS) += greybus/
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 669392f..6284aff 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -47,27 +47,20 @@
{
struct spk_ldisc_data *ldisc_data;
+ if (tty != speakup_tty)
+ /* Somebody tried to use this line discipline outside speakup */
+ return -ENODEV;
+
if (!tty->ops->write)
return -EOPNOTSUPP;
- mutex_lock(&speakup_tty_mutex);
- if (speakup_tty) {
- mutex_unlock(&speakup_tty_mutex);
- return -EBUSY;
- }
- speakup_tty = tty;
-
ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
- if (!ldisc_data) {
- speakup_tty = NULL;
- mutex_unlock(&speakup_tty_mutex);
+ if (!ldisc_data)
return -ENOMEM;
- }
init_completion(&ldisc_data->completion);
ldisc_data->buf_free = true;
- speakup_tty->disc_data = ldisc_data;
- mutex_unlock(&speakup_tty_mutex);
+ tty->disc_data = ldisc_data;
return 0;
}
@@ -191,9 +184,25 @@
tty_unlock(tty);
+ mutex_lock(&speakup_tty_mutex);
+ speakup_tty = tty;
ret = tty_set_ldisc(tty, N_SPEAKUP);
if (ret)
- pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
+ speakup_tty = NULL;
+ mutex_unlock(&speakup_tty_mutex);
+
+ if (!ret)
+ /* Success */
+ return 0;
+
+ pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
+
+ tty_lock(tty);
+ if (tty->ops->close)
+ tty->ops->close(tty, NULL);
+ tty_unlock(tty);
+
+ tty_kclose(tty);
return ret;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9929ff5..770d840 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -44,7 +44,7 @@
* iort_set_fwnode() - Create iort_fwnode and use it to register
* iommu data in the iort_fwnode_list
*
- * @node: IORT table node associated with the IOMMU
+ * @iort_node: IORT table node associated with the IOMMU
* @fwnode: fwnode associated with the IORT node
*
* Returns: 0 on success
@@ -673,7 +673,8 @@
/**
* iort_get_device_domain() - Find MSI domain related to a device
* @dev: The device.
- * @req_id: Requester ID for the device.
+ * @id: Requester ID for the device.
+ * @bus_token: irq domain bus token.
*
* Returns: the MSI domain for this device, NULL otherwise
*/
@@ -1136,7 +1137,7 @@
*
* @dev: device to configure
* @dma_addr: device DMA address result pointer
- * @size: DMA range size result pointer
+ * @dma_size: DMA range size result pointer
*/
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
@@ -1526,6 +1527,7 @@
/**
* iort_add_platform_device() - Allocate a platform device for IORT node
* @node: Pointer to device ACPI IORT node
+ * @ops: Pointer to IORT device config struct
*
* Returns: 0 on success, <0 failure
*/
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index efb088d..92ecf1a 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -227,6 +227,9 @@
u32 sysc_mask, syss_done, rstval;
int syss_offset, error = 0;
+ if (ddata->cap->regbits->srst_shift < 0)
+ return 0;
+
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
@@ -970,9 +973,15 @@
return error;
}
}
- error = sysc_wait_softreset(ddata);
- if (error)
- dev_warn(ddata->dev, "OCP softreset timed out\n");
+ /*
+ * Some modules like i2c and hdq1w have unusable reset status unless
+ * the module reset quirk is enabled. Skip status check on enable.
+ */
+ if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ }
if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
sysc_disable_opt_clocks(ddata);
@@ -1373,17 +1382,17 @@
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
- SYSC_MODULE_QUIRK_HDQ1W),
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
- SYSC_MODULE_QUIRK_HDQ1W),
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
- SYSC_MODULE_QUIRK_I2C),
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
- SYSC_MODULE_QUIRK_I2C),
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
- SYSC_MODULE_QUIRK_I2C),
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
- SYSC_MODULE_QUIRK_I2C),
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
SYSC_MODULE_QUIRK_SGX),
@@ -2880,7 +2889,7 @@
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
- return -EBUSY;
+ return -ENXIO;
return 0;
}
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 3b393cb..3061896 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -5,8 +5,8 @@
depends on ARCH_MXC || COMPILE_TEST
config MXC_CLK_SCU
- tristate "IMX SCU clock"
- depends on ARCH_MXC || COMPILE_TEST
+ tristate
+ depends on ARCH_MXC
depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index d900f6b..892e91b 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -55,7 +55,7 @@
u16 sel, g1, r1, g2, r2;
} dual;
};
-} __packed;
+};
#define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
{ .gate = _clk, .reset = _rst, \
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 7831850..8286205 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -236,13 +236,15 @@
if (!handle || !handle->perf_ops)
return -ENODEV;
+#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
if (of_find_property(dev->of_node, "#clock-cells", NULL))
devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+#endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
if (ret) {
- dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
__func__, ret);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 36ec1f7..d989549 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -270,7 +270,7 @@
config EFI_EARLYCON
def_bool y
- depends on SERIAL_EARLYCON && !ARM && !IA64
+ depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
select FONT_SUPPORT
select ARCH_USE_MEMREMAP_PROT
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5e5480a..6c6eec0 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -390,10 +390,10 @@
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
- efivar_ssdt_load();
error = generic_ops_register();
if (error)
goto err_put;
+ efivar_ssdt_load();
platform_device_register_simple("efivars", 0, NULL, 0);
}
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index efb8a66..fd95ede 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -20,12 +20,28 @@
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/hashtable.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+/* Max HashMap Order for PM API feature check (1<<7 = 128) */
+#define PM_API_FEATURE_CHECK_MAX_ORDER 7
+
static bool feature_check_enabled;
-static u32 zynqmp_pm_features[PM_API_MAX];
+static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+
+/**
+ * struct pm_api_feature_data - PM API Feature data
+ * @pm_api_id: PM API Id, used as key to index into hashmap
+ * @feature_status: status of PM API feature: valid, invalid
+ * @hentry: hlist_node that hooks this entry into hashtable
+ */
+struct pm_api_feature_data {
+ u32 pm_api_id;
+ int feature_status;
+ struct hlist_node hentry;
+};
static const struct mfd_cell firmware_devs[] = {
{
@@ -142,29 +158,37 @@
int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
u64 smc_arg[2];
+ struct pm_api_feature_data *feature_data;
if (!feature_check_enabled)
return 0;
- /* Return value if feature is already checked */
- if (api_id > ARRAY_SIZE(zynqmp_pm_features))
- return PM_FEATURE_INVALID;
+ /* Check for existing entry in hash table for given api */
+ hash_for_each_possible(pm_api_features_map, feature_data, hentry,
+ api_id) {
+ if (feature_data->pm_api_id == api_id)
+ return feature_data->feature_status;
+ }
- if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
- return zynqmp_pm_features[api_id];
+ /* Add new entry if not present */
+ feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
+ if (!feature_data)
+ return -ENOMEM;
+ feature_data->pm_api_id = api_id;
smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
smc_arg[1] = api_id;
ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
- if (ret) {
- zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
- return PM_FEATURE_INVALID;
- }
+ if (ret)
+ ret = -EOPNOTSUPP;
+ else
+ ret = ret_payload[1];
- zynqmp_pm_features[api_id] = ret_payload[1];
+ feature_data->feature_status = ret;
+ hash_add(pm_api_features_map, &feature_data->hentry, api_id);
- return zynqmp_pm_features[api_id];
+ return ret;
}
/**
@@ -200,9 +224,12 @@
* Make sure to stay in x0 register
*/
u64 smc_arg[4];
+ int ret;
- if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
- return -ENOTSUPP;
+ /* Check if feature is supported or not */
+ ret = zynqmp_pm_feature(pm_api_id);
+ if (ret < 0)
+ return ret;
smc_arg[0] = PM_SIP_SVC | pm_api_id;
smc_arg[1] = ((u64)arg1 << 32) | arg0;
@@ -615,7 +642,7 @@
*/
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
type, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
@@ -1252,9 +1279,17 @@
static int zynqmp_firmware_remove(struct platform_device *pdev)
{
+ struct pm_api_feature_data *feature_data;
+ int i;
+
mfd_remove_devices(&pdev->dev);
zynqmp_pm_api_debugfs_exit();
+ hash_for_each(pm_api_features_map, i, feature_data, hentry) {
+ hash_del(&feature_data->hentry);
+ kfree(feature_data);
+ }
+
return 0;
}
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 7cd5a29..5645226 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -142,6 +142,7 @@
tristate "FPGA Device Feature List (DFL) support"
select FPGA_BRIDGE
select FPGA_REGION
+ depends on HAS_IOMEM
help
Device Feature List (DFL) defines a feature list structure that
creates a linked list of feature headers within the MMIO space
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 5bda38e..2bc173c 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -192,6 +192,7 @@
ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip,
arizona_gpio);
if (ret < 0) {
+ pm_runtime_disable(&pdev->dev);
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
return ret;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 2a9046c..4275c18 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -724,6 +724,8 @@
return err;
}
+ platform_set_drvdata(pdev, gpio);
+
return 0;
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index ad61daf..865ab2b 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -598,7 +598,7 @@
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
- continue;
+ break;
sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sprd_eic->base[i]))
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 433e2c3..2f24559 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1197,6 +1197,13 @@
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
+ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
+ if (IS_ENABLED(CONFIG_PWM)) {
+ err = mvebu_pwm_probe(pdev, mvchip, id);
+ if (err)
+ return err;
+ }
+
/* Some gpio controllers do not provide irq support */
if (!have_irqs)
return 0;
@@ -1206,7 +1213,8 @@
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
- return -ENODEV;
+ err = -ENODEV;
+ goto err_pwm;
}
err = irq_alloc_domain_generic_chips(
@@ -1254,14 +1262,12 @@
mvchip);
}
- /* Some MVEBU SoCs have simple PWM support for GPIO lines */
- if (IS_ENABLED(CONFIG_PWM))
- return mvebu_pwm_probe(pdev, mvchip, id);
-
return 0;
err_domain:
irq_domain_remove(mvchip->domain);
+err_pwm:
+ pwmchip_remove(&mvchip->mvpwm->chip);
return err;
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 0b5a17a..3521c1d 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -574,7 +574,7 @@
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
int ret;
- ret = pm_runtime_get_sync(chip->parent);
+ ret = pm_runtime_resume_and_get(chip->parent);
if (ret < 0)
return ret;
@@ -942,7 +942,7 @@
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
goto err_pm_dis;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 089ddca..6e3c4d7 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1806,6 +1806,11 @@
*/
void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
{
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return;
+#endif
+
pinctrl_gpio_free(gc->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e3783f5..026789b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4852,7 +4852,7 @@
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
- if (ras && ras->supported)
+ if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
return amdgpu_dpm_baco_enter(adev);
@@ -4871,7 +4871,7 @@
if (ret)
return ret;
- if (ras && ras->supported)
+ if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 9579349..1b56dbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -459,6 +459,7 @@
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
+ struct drm_gem_object *gobj;
int ret;
memset(&bp, 0, sizeof(bp));
@@ -469,17 +470,20 @@
bp.type = ttm_bo_type_sg;
bp.resv = resv;
dma_resv_lock(resv, NULL);
- ret = amdgpu_bo_create(adev, &bp, &bo);
+ ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_CPU,
+ 0, ttm_bo_type_sg, resv, &gobj);
if (ret)
goto error;
+ bo = gem_to_amdgpu_bo(gobj);
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
- return &bo->tbo.base;
+ return gobj;
error:
dma_resv_unlock(resv);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7e8265d..e8c76bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -66,26 +66,12 @@
bp.type = type;
bp.resv = resv;
bp.preferred_domain = initial_domain;
-retry:
bp.flags = flags;
bp.domain = initial_domain;
r = amdgpu_bo_create(adev, &bp, &bo);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- }
-
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
- }
+ if (r)
return r;
- }
+
*obj = &bo->tbo.base;
return 0;
@@ -225,7 +211,7 @@
uint64_t size = args->in.bo_size;
struct dma_resv *resv = NULL;
struct drm_gem_object *gobj;
- uint32_t handle;
+ uint32_t handle, initial_domain;
int r;
/* reject invalid gem flags */
@@ -269,9 +255,28 @@
resv = vm->root.base.bo->tbo.base.resv;
}
+retry:
+ initial_domain = (u32)(0xffffffff & args->in.domains);
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
- (u32)(0xffffffff & args->in.domains),
+ initial_domain,
flags, ttm_bo_type_device, resv, &gobj);
+ if (r) {
+ if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
+ }
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
+ }
+ return r;
+ }
+
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 36604d7..3e4892b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -499,6 +499,9 @@
else
size = amdgpu_gmc_get_vbios_fb_size(adev);
+ if (adev->mman.keep_stolen_vga_memory)
+ size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+
/* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
size = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 4e36551..82cd8e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1172,7 +1172,7 @@
con->dir, &con->disable_ras_err_cnt_harvest);
}
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -1194,7 +1194,6 @@
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
struct ras_fs_if fs_info;
@@ -1203,7 +1202,7 @@
* it won't be called in resume path, no need to check
* suspend and gpu reset status
*/
- if (!con)
+ if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
return;
amdgpu_ras_debugfs_create_ctrl_node(adev);
@@ -1217,10 +1216,9 @@
amdgpu_ras_debugfs_create(adev, &fs_info);
}
}
-#endif
}
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
@@ -1234,7 +1232,6 @@
static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
@@ -1243,7 +1240,6 @@
}
con->dir = NULL;
-#endif
}
/* debugfs end */
@@ -1291,7 +1287,8 @@
static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
{
- amdgpu_ras_debugfs_remove_all(adev);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ amdgpu_ras_debugfs_remove_all(adev);
amdgpu_ras_sysfs_remove_all(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6b8d7bb..ec398ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -607,14 +607,8 @@
int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
- struct ras_fs_if *head);
-
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
- struct ras_common_if *head);
-
int amdgpu_ras_error_query(struct amdgpu_device *adev,
struct ras_query_if *info);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8039d23..a0248d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -69,10 +69,10 @@
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
- uint64_t size)
+ uint64_t size_in_page)
{
return ttm_range_man_init(&adev->mman.bdev, type,
- false, size >> PAGE_SHIFT);
+ false, size_in_page);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 5eb6328..edbb819 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -67,6 +67,7 @@
unsigned harvest_config;
/* store image width to adjust nb memory state */
unsigned decode_image_width;
+ uint32_t keyselect;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 3579565..55f4b8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3105,6 +3105,8 @@
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 9f39527..2a48505 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -186,7 +186,7 @@
if (err)
goto out;
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
+ err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
if (err)
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 7cf4b11..41800fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -277,15 +277,8 @@
*/
static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
{
- void *ptr;
- uint32_t ucode_len, i;
- uint32_t keysel;
-
- ptr = adev->uvd.inst[0].cpu_addr;
- ptr += 192 + 16;
- memcpy(&ucode_len, ptr, 4);
- ptr += ucode_len;
- memcpy(&keysel, ptr, 4);
+ int i;
+ uint32_t keysel = adev->uvd.keyselect;
WREG32(mmUVD_FW_START, keysel);
@@ -550,6 +543,8 @@
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ void *ptr;
+ uint32_t ucode_len;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
@@ -571,6 +566,13 @@
if (r)
return r;
+ /* Retrieval firmware validate key */
+ ptr = adev->uvd.inst[0].cpu_addr;
+ ptr += 192 + 16;
+ memcpy(&ucode_len, ptr, 4);
+ ptr += ucode_len;
+ memcpy(&adev->uvd.keyselect, ptr, 4);
+
r = amdgpu_uvd_entity_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index e074f7e..b5f8f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1011,6 +1011,11 @@
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
/* set the write pointer delay */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
@@ -1033,6 +1038,10 @@
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
return 0;
}
@@ -1556,8 +1565,14 @@
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
/* Restore */
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ ring->wptr = 0;
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
@@ -1565,14 +1580,16 @@
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
+ ring->wptr = 0;
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1630,10 +1647,6 @@
{
struct amdgpu_device *adev = ring->adev;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
- lower_32_bits(ring->wptr) | 0x80000000);
-
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 222f1df..8cc51ce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1736,6 +1736,7 @@
}
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1745,6 +1746,7 @@
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
+ dma_buf_put(dmabuf);
return r;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0e71180..0f7749e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1041,7 +1041,7 @@
amdgpu_dm_init_color_mod();
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN) {
+ if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
@@ -1058,9 +1058,6 @@
goto error;
}
- /* Update the actual used number of crtc */
- adev->mode_info.num_crtc = adev->dm.display_indexes_num;
-
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@@ -3251,6 +3248,10 @@
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n");
@@ -3312,8 +3313,6 @@
goto fail;
}
- dm->display_indexes_num = dm->dc->caps.max_streams;
-
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 2f8fee0..6b431db 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -163,8 +163,17 @@
new_clocks->dppclk_khz = 100000;
}
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
- if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ /*
+ * Temporally ignore thew 0 cases for disp and dpp clks.
+ * We may have a new feature that requires 0 clks in the future.
+ */
+ if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) {
+ new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+ new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
+ if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
update_dppclk = true;
@@ -570,7 +579,7 @@
};
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
@@ -607,7 +616,7 @@
}
};
-static struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table_gs = {
.entries = {
{
.wm_inst = WM_A,
@@ -681,6 +690,80 @@
}
};
+static struct wm_table ddr4_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 9.09,
+ .sr_enter_plus_exit_time_us = 10.14,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 10.12,
+ .sr_enter_plus_exit_time_us = 11.48,
+ .valid = true,
+ },
+ }
+};
+
+static struct wm_table lpddr4_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 7.32,
+ .sr_enter_plus_exit_time_us = 8.38,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.82,
+ .sr_enter_plus_exit_time_us = 11.196,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.89,
+ .sr_enter_plus_exit_time_us = 11.24,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 9.748,
+ .sr_enter_plus_exit_time_us = 11.102,
+ .valid = true,
+ },
+ }
+};
+
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
@@ -762,6 +845,11 @@
struct dc_debug_options *debug = &ctx->dc->debug;
struct dpm_clocks clock_table = { 0 };
enum pp_smu_status status = 0;
+ int is_green_sardine = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
+#endif
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
@@ -802,10 +890,16 @@
if (clk_mgr->periodic_retraining_disabled) {
rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
} else {
- rn_bw_params.wm_table = lpddr4_wm_table;
+ if (is_green_sardine)
+ rn_bw_params.wm_table = lpddr4_wm_table_gs;
+ else
+ rn_bw_params.wm_table = lpddr4_wm_table_rn;
}
} else {
- rn_bw_params.wm_table = ddr4_wm_table;
+ if (is_green_sardine)
+ rn_bw_params.wm_table = ddr4_wm_table_gs;
+ else
+ rn_bw_params.wm_table = ddr4_wm_table_rn;
}
/* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fec87a2..5b0cedf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3394,10 +3394,13 @@
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
+ struct fixed31_32 link_bw_kbps;
if (timing->flags.DSC) {
- kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
- kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz);
+ link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160);
+ link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel);
+ kbps = dc_fixpt_ceil(link_bw_kbps);
return kbps;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/smu10.h b/drivers/gpu/drm/amd/pm/inc/smu10.h
index b965205..9e837a5 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu10.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu10.h
@@ -136,14 +136,12 @@
#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT)
/* Workload bits */
-#define WORKLOAD_DEFAULT_BIT 0
-#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
-#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
-#define WORKLOAD_PPLIB_VIDEO_BIT 3
-#define WORKLOAD_PPLIB_VR_BIT 4
-#define WORKLOAD_PPLIB_COMPUTE_BIT 5
-#define WORKLOAD_PPLIB_CUSTOM_BIT 6
-#define WORKLOAD_PPLIB_COUNT 7
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
+#define WORKLOAD_PPLIB_VIDEO_BIT 2
+#define WORKLOAD_PPLIB_VR_BIT 3
+#define WORKLOAD_PPLIB_COMPUTE_BIT 4
+#define WORKLOAD_PPLIB_CUSTOM_BIT 5
+#define WORKLOAD_PPLIB_COUNT 6
typedef struct {
/* MP1_EXT_SCRATCH0 */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
index 719597c..6606511 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
@@ -24,6 +24,8 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/pci.h>
+
#include <drm/amdgpu_drm.h>
#include "processpptables.h"
#include <atom-types.h>
@@ -984,6 +986,8 @@
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+
hwmgr->thermal_controller.ucType =
powerplay_table->sThermalController.ucType;
hwmgr->thermal_controller.ucI2cLine =
@@ -1008,7 +1012,104 @@
ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
PHM_PlatformCaps_ThermalController);
- hwmgr->thermal_controller.use_hw_fan_control = 1;
+ if (powerplay_table->usTableSize >= sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
+ const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
+ (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
+
+ if (0 == le16_to_cpu(powerplay_table3->usFanTableOffset)) {
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
+ return 0;
+ } else {
+ const ATOM_PPLIB_FANTABLE *fan_table =
+ (const ATOM_PPLIB_FANTABLE *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ if (1 <= fan_table->ucFanTableFormat) {
+ hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst =
+ fan_table->ucTHyst;
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin =
+ le16_to_cpu(fan_table->usTMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed =
+ le16_to_cpu(fan_table->usTMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTHigh =
+ le16_to_cpu(fan_table->usTHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table->usPWMMin);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed =
+ le16_to_cpu(fan_table->usPWMMed);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh =
+ le16_to_cpu(fan_table->usPWMHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900;
+ hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay = 100000;
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ }
+
+ if (2 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE2 *fan_table2 =
+ (const ATOM_PPLIB_FANTABLE2 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table2->usTMax);
+ }
+
+ if (3 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE3 *fan_table3 =
+ (const ATOM_PPLIB_FANTABLE3 *) (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode =
+ fan_table3->ucFanControlMode;
+
+ if ((3 == fan_table->ucFanTableFormat) &&
+ (0x67B1 == adev->pdev->device))
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+ 47;
+ else
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+ le16_to_cpu(fan_table3->usFanPWMMax);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity =
+ 4836;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table3->usFanOutputSensitivity);
+ }
+
+ if (6 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE4 *fan_table4 =
+ (const ATOM_PPLIB_FANTABLE4 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM =
+ le16_to_cpu(fan_table4->usFanRPMMax);
+ }
+
+ if (7 <= fan_table->ucFanTableFormat) {
+ const ATOM_PPLIB_FANTABLE5 *fan_table5 =
+ (const ATOM_PPLIB_FANTABLE5 *)(((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+ if (0x67A2 == adev->pdev->device ||
+ 0x67A9 == adev->pdev->device ||
+ 0x67B9 == adev->pdev->device) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_GeminiRegulatorFanControlSupport);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentLow =
+ le16_to_cpu(fan_table5->usFanCurrentLow);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentHigh =
+ le16_to_cpu(fan_table5->usFanCurrentHigh);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMLow =
+ le16_to_cpu(fan_table5->usFanRPMLow);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMHigh =
+ le16_to_cpu(fan_table5->usFanRPMHigh);
+ }
+ }
+ }
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index cf60f39..e6f40ee 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1297,15 +1297,9 @@
int pplib_workload = 0;
switch (power_profile) {
- case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
- pplib_workload = WORKLOAD_DEFAULT_BIT;
- break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
- case PP_SMC_POWER_PROFILE_POWERSAVING:
- pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
- break;
case PP_SMC_POWER_PROFILE_VIDEO:
pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
break;
@@ -1315,6 +1309,9 @@
case PP_SMC_POWER_PROFILE_COMPUTE:
pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+ break;
}
return pplib_workload;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 895d89b..cf7c4f0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -217,7 +217,7 @@
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
- WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 2380759..6db96fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1164,7 +1164,12 @@
if (ret)
return ret;
- crystal_clock_freq = amdgpu_asic_get_xclk(adev);
+ /*
+ * crystal_clock_freq div by 4 is required since the fan control
+ * module refers to 25MHz
+ */
+
+ crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4;
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 834a156..0a1e1cf 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -742,7 +742,6 @@
case DRM_MODE_DPMS_SUSPEND:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
- ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
@@ -778,6 +777,21 @@
}
static void
+ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
+{
+ struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
+ struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (old_ast_crtc_state->format != ast_crtc_state->format)
+ ast_crtc_load_lut(ast, crtc);
+}
+
+static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -830,6 +844,7 @@
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check,
+ .atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 6417f37..951d5f70 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC Exynos Series"
- depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on OF && DRM && COMMON_CLK
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 99e6825..aabf09f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -18021,16 +18021,6 @@
if (!HAS_GMCH(i915))
sanitize_watermarks(i915);
- /*
- * Force all active planes to recompute their states. So that on
- * mode_setcrtc after probe, all the intel_plane_state variables
- * are already calculated and there is no assert_plane warnings
- * during bootup.
- */
- ret = intel_initial_commit(dev);
- if (ret)
- drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
-
return 0;
}
@@ -18039,11 +18029,21 @@
{
int ret;
- intel_overlay_setup(i915);
-
if (!HAS_DISPLAY(i915))
return 0;
+ /*
+ * Force all active planes to recompute their states. So that on
+ * mode_setcrtc after probe, all the intel_plane_state variables
+ * are already calculated and there is no assert_plane warnings
+ * during bootup.
+ */
+ ret = intel_initial_commit(&i915->drm);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
+
+ intel_overlay_setup(i915);
+
ret = intel_fbdev_init(&i915->drm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index bf1e9cf1..9bc59fd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -573,7 +573,7 @@
return 0;
}
/* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
+ min_slice_count = max_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 1904e6e..b07dc11 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -3097,7 +3097,7 @@
break;
}
-static void eb_request_add(struct i915_execbuffer *eb)
+static int eb_request_add(struct i915_execbuffer *eb, int err)
{
struct i915_request *rq = eb->request;
struct intel_timeline * const tl = i915_request_timeline(rq);
@@ -3118,6 +3118,7 @@
/* Serialise with context_close via the add_to_timeline */
i915_request_set_error_once(rq, -ENOENT);
__i915_request_skip(rq);
+ err = -ENOENT; /* override any transient errors */
}
__i915_request_queue(rq, &attr);
@@ -3127,6 +3128,8 @@
retire_requests(tl, prev);
mutex_unlock(&tl->mutex);
+
+ return err;
}
static const i915_user_extension_fn execbuf_extensions[] = {
@@ -3332,7 +3335,7 @@
err = eb_submit(&eb, batch);
err_request:
i915_request_get(eb.request);
- eb_request_add(&eb);
+ err = eb_request_add(&eb, err);
if (eb.fences)
signal_fence_array(&eb);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d8b206e..a24cc1f 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -30,18 +30,21 @@
#include "i915_trace.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
-static void irq_enable(struct intel_engine_cs *engine)
+static bool irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
- return;
+ return false;
/* Caller disables interrupts */
spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->gt->irq_lock);
+
+ return true;
}
static void irq_disable(struct intel_engine_cs *engine)
@@ -57,12 +60,11 @@
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
- lockdep_assert_held(&b->irq_lock);
-
- if (!b->irq_engine || b->irq_armed)
- return;
-
- if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
+ /*
+ * Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference.
+ */
+ if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
return;
/*
@@ -73,25 +75,24 @@
*/
WRITE_ONCE(b->irq_armed, true);
- /*
- * Since we are waiting on a request, the GPU should be busy
- * and should have its own rpm reference. This is tracked
- * by i915->gt.awake, we can forgo holding our own wakref
- * for the interrupt as before i915->gt.awake is released (when
- * the driver is idle) we disarm the breadcrumbs.
- */
+ /* Requests may have completed before we could enable the interrupt. */
+ if (!b->irq_enabled++ && irq_enable(b->irq_engine))
+ irq_work_queue(&b->irq_work);
+}
- if (!b->irq_enabled++)
- irq_enable(b->irq_engine);
+static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+{
+ if (!b->irq_engine)
+ return;
+
+ spin_lock(&b->irq_lock);
+ if (!b->irq_armed)
+ __intel_breadcrumbs_arm_irq(b);
+ spin_unlock(&b->irq_lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
- lockdep_assert_held(&b->irq_lock);
-
- if (!b->irq_engine || !b->irq_armed)
- return;
-
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
irq_disable(b->irq_engine);
@@ -100,20 +101,37 @@
intel_gt_pm_put_async(b->irq_engine->gt);
}
+static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
+ spin_lock(&b->irq_lock);
+ if (b->irq_armed)
+ __intel_breadcrumbs_disarm_irq(b);
+ spin_unlock(&b->irq_lock);
+}
+
static void add_signaling_context(struct intel_breadcrumbs *b,
struct intel_context *ce)
{
- intel_context_get(ce);
- list_add_tail(&ce->signal_link, &b->signalers);
- if (list_is_first(&ce->signal_link, &b->signalers))
- __intel_breadcrumbs_arm_irq(b);
+ lockdep_assert_held(&ce->signal_lock);
+
+ spin_lock(&b->signalers_lock);
+ list_add_rcu(&ce->signal_link, &b->signalers);
+ spin_unlock(&b->signalers_lock);
}
-static void remove_signaling_context(struct intel_breadcrumbs *b,
+static bool remove_signaling_context(struct intel_breadcrumbs *b,
struct intel_context *ce)
{
- list_del(&ce->signal_link);
- intel_context_put(ce);
+ lockdep_assert_held(&ce->signal_lock);
+
+ if (!list_empty(&ce->signals))
+ return false;
+
+ spin_lock(&b->signalers_lock);
+ list_del_rcu(&ce->signal_link);
+ spin_unlock(&b->signalers_lock);
+
+ return true;
}
static inline bool __request_completed(const struct i915_request *rq)
@@ -174,73 +192,103 @@
intel_engine_add_retire(b->irq_engine, tl);
}
-static bool __signal_request(struct i915_request *rq, struct list_head *signals)
+static bool __signal_request(struct i915_request *rq)
{
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
if (!__dma_fence_signal(&rq->fence)) {
i915_request_put(rq);
return false;
}
- list_add_tail(&rq->signal_link, signals);
return true;
}
+static struct llist_node *
+slist_add(struct llist_node *node, struct llist_node *head)
+{
+ node->next = head;
+ return node;
+}
+
static void signal_irq_work(struct irq_work *work)
{
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
- struct intel_context *ce, *cn;
- struct list_head *pos, *next;
- LIST_HEAD(signal);
+ struct llist_node *signal, *sn;
+ struct intel_context *ce;
- spin_lock(&b->irq_lock);
+ signal = NULL;
+ if (unlikely(!llist_empty(&b->signaled_requests)))
+ signal = llist_del_all(&b->signaled_requests);
- if (list_empty(&b->signalers))
- __intel_breadcrumbs_disarm_irq(b);
+ /*
+ * Keep the irq armed until the interrupt after all listeners are gone.
+ *
+ * Enabling/disabling the interrupt is rather costly, roughly a couple
+ * of hundred microseconds. If we are proactive and enable/disable
+ * the interrupt around every request that wants a breadcrumb, we
+ * quickly drown in the extra orders of magnitude of latency imposed
+ * on request submission.
+ *
+ * So we try to be lazy, and keep the interrupts enabled until no
+ * more listeners appear within a breadcrumb interrupt interval (that
+ * is until a request completes that no one cares about). The
+ * observation is that listeners come in batches, and will often
+ * listen to a bunch of requests in succession. Though note on icl+,
+ * interrupts are always enabled due to concerns with rc6 being
+ * dysfunctional with per-engine interrupt masking.
+ *
+ * We also try to avoid raising too many interrupts, as they may
+ * be generated by userspace batches and it is unfortunately rather
+ * too easy to drown the CPU under a flood of GPU interrupts. Thus
+ * whenever no one appears to be listening, we turn off the interrupts.
+ * Fewer interrupts should conserve power -- at the very least, fewer
+ * interrupt draw less ire from other users of the system and tools
+ * like powertop.
+ */
+ if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers))
+ intel_breadcrumbs_disarm_irq(b);
- list_splice_init(&b->signaled_requests, &signal);
+ rcu_read_lock();
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ struct i915_request *rq;
- list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
- GEM_BUG_ON(list_empty(&ce->signals));
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
+ bool release;
- list_for_each_safe(pos, next, &ce->signals) {
- struct i915_request *rq =
- list_entry(pos, typeof(*rq), signal_link);
-
- GEM_BUG_ON(!check_signal_order(ce, rq));
if (!__request_completed(rq))
break;
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags))
+ break;
+
/*
* Queue for execution after dropping the signaling
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
- __signal_request(rq, &signal);
- }
+ spin_lock(&ce->signal_lock);
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(b, ce);
+ spin_unlock(&ce->signal_lock);
- /*
- * We process the list deletion in bulk, only using a list_add
- * (not list_move) above but keeping the status of
- * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
- */
- if (!list_is_first(pos, &ce->signals)) {
- /* Advance the list to the first incomplete request */
- __list_del_many(&ce->signals, pos);
- if (&ce->signals == pos) { /* now empty */
+ if (__signal_request(rq))
+ /* We own signal_node now, xfer to local list */
+ signal = slist_add(&rq->signal_node, signal);
+
+ if (release) {
add_retire(b, ce->timeline);
- remove_signaling_context(b, ce);
+ intel_context_put(ce);
}
}
}
+ rcu_read_unlock();
- spin_unlock(&b->irq_lock);
-
- list_for_each_safe(pos, next, &signal) {
+ llist_for_each_safe(signal, sn, signal) {
struct i915_request *rq =
- list_entry(pos, typeof(*rq), signal_link);
+ llist_entry(signal, typeof(*rq), signal_node);
struct list_head cb_list;
spin_lock(&rq->lock);
@@ -251,6 +299,9 @@
i915_request_put(rq);
}
+
+ if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
+ intel_breadcrumbs_arm_irq(b);
}
struct intel_breadcrumbs *
@@ -262,14 +313,15 @@
if (!b)
return NULL;
- spin_lock_init(&b->irq_lock);
- INIT_LIST_HEAD(&b->signalers);
- INIT_LIST_HEAD(&b->signaled_requests);
-
- init_irq_work(&b->irq_work, signal_irq_work);
-
b->irq_engine = irq_engine;
+ spin_lock_init(&b->signalers_lock);
+ INIT_LIST_HEAD(&b->signalers);
+ init_llist_head(&b->signaled_requests);
+
+ spin_lock_init(&b->irq_lock);
+ init_irq_work(&b->irq_work, signal_irq_work);
+
return b;
}
@@ -292,27 +344,28 @@
void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- unsigned long flags;
-
- if (!READ_ONCE(b->irq_armed))
- return;
-
- spin_lock_irqsave(&b->irq_lock, flags);
- __intel_breadcrumbs_disarm_irq(b);
- spin_unlock_irqrestore(&b->irq_lock, flags);
-
- if (!list_empty(&b->signalers))
- irq_work_queue(&b->irq_work);
+ /* Kick the work once more to drain the signalers */
+ irq_work_sync(&b->irq_work);
+ while (unlikely(READ_ONCE(b->irq_armed))) {
+ local_irq_disable();
+ signal_irq_work(&b->irq_work);
+ local_irq_enable();
+ cond_resched();
+ }
+ GEM_BUG_ON(!list_empty(&b->signalers));
}
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
{
+ irq_work_sync(&b->irq_work);
+ GEM_BUG_ON(!list_empty(&b->signalers));
+ GEM_BUG_ON(b->irq_armed);
kfree(b);
}
-static void insert_breadcrumb(struct i915_request *rq,
- struct intel_breadcrumbs *b)
+static void insert_breadcrumb(struct i915_request *rq)
{
+ struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
struct intel_context *ce = rq->context;
struct list_head *pos;
@@ -327,12 +380,14 @@
* its signal completion.
*/
if (__request_completed(rq)) {
- if (__signal_request(rq, &b->signaled_requests))
+ if (__signal_request(rq) &&
+ llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work);
return;
}
if (list_empty(&ce->signals)) {
+ intel_context_get(ce);
add_signaling_context(b, ce);
pos = &ce->signals;
} else {
@@ -358,18 +413,22 @@
break;
}
}
- list_add(&rq->signal_link, pos);
+ list_add_rcu(&rq->signal_link, pos);
GEM_BUG_ON(!check_signal_order(ce, rq));
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- /* Check after attaching to irq, interrupt may have already fired. */
- if (__request_completed(rq))
- irq_work_queue(&b->irq_work);
+ /*
+ * Defer enabling the interrupt to after HW submission and recheck
+ * the request as it may have completed and raised the interrupt as
+ * we were attaching it into the lists.
+ */
+ irq_work_queue(&b->irq_work);
}
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b;
+ struct intel_context *ce = rq->context;
/* Serialises with i915_request_retire() using rq->lock */
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
@@ -384,67 +443,30 @@
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
return true;
- /*
- * rq->engine is locked by rq->engine->active.lock. That however
- * is not known until after rq->engine has been dereferenced and
- * the lock acquired. Hence we acquire the lock and then validate
- * that rq->engine still matches the lock we hold for it.
- *
- * Here, we are using the breadcrumb lock as a proxy for the
- * rq->engine->active.lock, and we know that since the breadcrumb
- * will be serialised within i915_request_submit/i915_request_unsubmit,
- * the engine cannot change while active as long as we hold the
- * breadcrumb lock on that engine.
- *
- * From the dma_fence_enable_signaling() path, we are outside of the
- * request submit/unsubmit path, and so we must be more careful to
- * acquire the right lock.
- */
- b = READ_ONCE(rq->engine)->breadcrumbs;
- spin_lock(&b->irq_lock);
- while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) {
- spin_unlock(&b->irq_lock);
- b = READ_ONCE(rq->engine)->breadcrumbs;
- spin_lock(&b->irq_lock);
- }
-
- /*
- * Now that we are finally serialised with request submit/unsubmit,
- * [with b->irq_lock] and with i915_request_retire() [via checking
- * SIGNALED with rq->lock] confirm the request is indeed active. If
- * it is no longer active, the breadcrumb will be attached upon
- * i915_request_submit().
- */
+ spin_lock(&ce->signal_lock);
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
- insert_breadcrumb(rq, b);
-
- spin_unlock(&b->irq_lock);
+ insert_breadcrumb(rq);
+ spin_unlock(&ce->signal_lock);
return true;
}
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = rq->engine->breadcrumbs;
+ struct intel_context *ce = rq->context;
+ bool release;
- /*
- * We must wait for b->irq_lock so that we know the interrupt handler
- * has released its reference to the intel_context and has completed
- * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
- * required).
- */
- spin_lock(&b->irq_lock);
- if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
- struct intel_context *ce = rq->context;
+ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ return;
- list_del(&rq->signal_link);
- if (list_empty(&ce->signals))
- remove_signaling_context(b, ce);
+ spin_lock(&ce->signal_lock);
+ list_del_rcu(&rq->signal_link);
+ release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+ spin_unlock(&ce->signal_lock);
+ if (release)
+ intel_context_put(ce);
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- i915_request_put(rq);
- }
- spin_unlock(&b->irq_lock);
+ i915_request_put(rq);
}
static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
@@ -454,18 +476,17 @@
drm_printf(p, "Signals:\n");
- spin_lock_irq(&b->irq_lock);
- list_for_each_entry(ce, &b->signalers, signal_link) {
- list_for_each_entry(rq, &ce->signals, signal_link) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
+ list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
- }
}
- spin_unlock_irq(&b->irq_lock);
+ rcu_read_unlock();
}
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 8e53b99..a74bb30 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -29,18 +29,16 @@
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- spinlock_t irq_lock; /* protects the lists used in hardirq context */
-
/* Not all breadcrumbs are attached to physical HW */
struct intel_engine_cs *irq_engine;
+ spinlock_t signalers_lock; /* protects the list of signalers */
struct list_head signalers;
- struct list_head signaled_requests;
+ struct llist_head signaled_requests;
+ spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
-
unsigned int irq_enabled;
-
bool irq_armed;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 92a3f25..349e7fa 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -25,9 +25,16 @@
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
+static void rcu_context_free(struct rcu_head *rcu)
+{
+ struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
+
+ kmem_cache_free(global.slab_ce, ce);
+}
+
void intel_context_free(struct intel_context *ce)
{
- kmem_cache_free(global.slab_ce, ce);
+ call_rcu(&ce->rcu, rcu_context_free);
}
struct intel_context *
@@ -356,8 +363,7 @@
}
void
-intel_context_init(struct intel_context *ce,
- struct intel_engine_cs *engine)
+intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
{
GEM_BUG_ON(!engine->cops);
GEM_BUG_ON(!engine->gt->vm);
@@ -373,7 +379,8 @@
ce->vm = i915_vm_get(engine->gt->vm);
- INIT_LIST_HEAD(&ce->signal_link);
+ /* NB ce->signal_link/lock is used under RCU */
+ spin_lock_init(&ce->signal_lock);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 552cb57..52fa9c1 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -25,6 +25,7 @@
struct i915_gem_context;
struct i915_gem_ww_ctx;
struct i915_vma;
+struct intel_breadcrumbs;
struct intel_context;
struct intel_ring;
@@ -44,7 +45,16 @@
};
struct intel_context {
- struct kref ref;
+ /*
+ * Note: Some fields may be accessed under RCU.
+ *
+ * Unless otherwise noted a field can safely be assumed to be protected
+ * by strong reference counting.
+ */
+ union {
+ struct kref ref; /* no kref_get_unless_zero()! */
+ struct rcu_head rcu;
+ };
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
@@ -54,8 +64,15 @@
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
- struct list_head signal_link;
- struct list_head signals;
+ /*
+ * @signal_lock protects the list of requests that need signaling,
+ * @signals. While there are any requests that need signaling,
+ * we add the context to the breadcrumbs worker, and remove it
+ * upon completion/cancellation of the last request.
+ */
+ struct list_head signal_link; /* Accessed under RCU */
+ struct list_head signals; /* Guarded by signal_lock */
+ spinlock_t signal_lock; /* protects signals, the list of requests */
struct i915_vma *state;
struct intel_ring *ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9bb16bdf9..724b2cb 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -182,6 +182,7 @@
struct virtual_engine {
struct intel_engine_cs base;
struct intel_context context;
+ struct rcu_work rcu;
/*
* We allow only a single request through the virtual engine at a time
@@ -2787,6 +2788,9 @@
static bool execlists_hold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ if (i915_request_on_hold(rq))
+ return false;
+
spin_lock_irq(&engine->active.lock);
if (i915_request_completed(rq)) { /* too late! */
@@ -3168,8 +3172,10 @@
spin_unlock_irqrestore(&engine->active.lock, flags);
/* Recheck after serialising with direct-submission */
- if (unlikely(timeout && preempt_timeout(engine)))
+ if (unlikely(timeout && preempt_timeout(engine))) {
+ cancel_timer(&engine->execlists.preempt);
execlists_reset(engine, "preemption time out");
+ }
}
}
@@ -5425,33 +5431,57 @@
return &ve->base.execlists.default_priolist.requests[0];
}
-static void virtual_context_destroy(struct kref *kref)
+static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
struct virtual_engine *ve =
- container_of(kref, typeof(*ve), context.ref);
+ container_of(wrk, typeof(*ve), rcu.work);
unsigned int n;
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
- GEM_BUG_ON(ve->request);
GEM_BUG_ON(ve->context.inflight);
+ /* Preempt-to-busy may leave a stale request behind. */
+ if (unlikely(ve->request)) {
+ struct i915_request *old;
+
+ spin_lock_irq(&ve->base.active.lock);
+
+ old = fetch_and_zero(&ve->request);
+ if (old) {
+ GEM_BUG_ON(!i915_request_completed(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ spin_unlock_irq(&ve->base.active.lock);
+ }
+
+ /*
+ * Flush the tasklet in case it is still running on another core.
+ *
+ * This needs to be done before we remove ourselves from the siblings'
+ * rbtrees as in the case it is running in parallel, it may reinsert
+ * the rb_node into a sibling.
+ */
+ tasklet_kill(&ve->base.execlists.tasklet);
+
+ /* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb;
- unsigned long flags;
if (RB_EMPTY_NODE(node))
continue;
- spin_lock_irqsave(&sibling->active.lock, flags);
+ spin_lock_irq(&sibling->active.lock);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
- spin_unlock_irqrestore(&sibling->active.lock, flags);
+ spin_unlock_irq(&sibling->active.lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
if (ve->context.state)
__execlists_context_fini(&ve->context);
@@ -5464,6 +5494,27 @@
kfree(ve);
}
+static void virtual_context_destroy(struct kref *kref)
+{
+ struct virtual_engine *ve =
+ container_of(kref, typeof(*ve), context.ref);
+
+ GEM_BUG_ON(!list_empty(&ve->context.signals));
+
+ /*
+ * When destroying the virtual engine, we have to be aware that
+ * it may still be in use from an hardirq/softirq context causing
+ * the resubmission of a completed request (background completion
+ * due to preempt-to-busy). Before we can free the engine, we need
+ * to flush the submission code and tasklets that are still potentially
+ * accessing the engine. Flushing the tasklets requires process context,
+ * and since we can guard the resubmit onto the engine with an RCU read
+ * lock, we can delegate the free of the engine to an RCU worker.
+ */
+ INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
+ queue_rcu_work(system_wq, &ve->rcu);
+}
+
static void virtual_engine_initial_hint(struct virtual_engine *ve)
{
int swp;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 313e51e..413dadf 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -59,8 +59,7 @@
#define _L3_CACHEABILITY(value) ((value) << 4)
/* Helper defines */
-#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
-#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
/*
@@ -131,7 +130,19 @@
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
- L3_3_WB)
+ L3_3_WB),
+
+ /*
+ * mocs:63
+ * - used by the L3 for all of its evictions.
+ * Thus it is expected to allow LLC cacheability to enable coherent
+ * flows to be maintained.
+ * - used to force L3 uncachable cycles.
+ * Thus it is expected to make the surface L3 uncacheable.
+ */
+ MOCS_ENTRY(63,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC)
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
@@ -316,11 +327,11 @@
if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN(i915, 11)) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
- table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index e6a00ee..c1c9cc0 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -883,6 +883,10 @@
adj = -2;
rps->last_adj = adj;
rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
+ if (rps->cur_freq < rps->efficient_freq) {
+ rps->cur_freq = rps->efficient_freq;
+ rps->last_adj = 0;
+ }
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index f011ea4..5982b62 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -73,7 +73,7 @@
mapping_set_unevictable(file->f_mapping);
return vaddr;
err_page:
- while (--i >= 0)
+ while (i--)
put_page(pages[i]);
kvfree(pages);
return NULL;
@@ -103,10 +103,13 @@
return PTR_ERR(page);
vaddr = kmap(page);
- if (write)
+ if (write) {
memcpy(vaddr + offset_in_page(off), ptr, this);
- else
+ set_page_dirty(page);
+ } else {
memcpy(ptr, vaddr + offset_in_page(off), this);
+ }
+ mark_page_accessed(page);
kunmap(page);
put_page(page);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 9831361..a81cf0f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -255,7 +255,7 @@
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
-/* This reg has been accessed through GPU commands */
+/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
/* This reg is in GVT's mmio save-restor list and in hardware
* logical context image
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e949769..3640d0e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -909,8 +909,13 @@
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- intel_uncore_write(uncore, oastatus_reg,
- oastatus & ~GEN8_OASTATUS_REPORT_LOST);
+
+ intel_uncore_rmw(uncore, oastatus_reg,
+ GEN8_OASTATUS_COUNTER_OVERFLOW |
+ GEN8_OASTATUS_REPORT_LOST,
+ IS_GEN_RANGE(uncore->i915, 8, 10) ?
+ (GEN8_OASTATUS_HEAD_POINTER_WRAP |
+ GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
return gen8_append_oa_reports(stream, buf, count, offset);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 664f3bf..5cd83ea 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -676,6 +676,8 @@
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08)
+#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17)
+#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16)
#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 16b7210..620b6fa 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -176,7 +176,9 @@
struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
+
struct list_head signal_link;
+ struct llist_node signal_node;
/*
* The rcu epoch of when this request was allocated. Used to judiciously
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 23a6132..412e216 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -211,8 +211,8 @@
return PTR_ERR(obj);
obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
+ if (IS_ERR(obj2)) {
+ err = PTR_ERR(obj2);
goto put1;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index cf11c48..52f11a6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -522,15 +522,6 @@
return 0;
}
-static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
- .destroy = mtk_dpi_encoder_destroy,
-};
-
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4a188a9..65fd99c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -444,7 +444,10 @@
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
+ u32 horizontal_front_back_byte;
+ u32 data_phy_cycles_byte;
u32 dsi_tmp_buf_bpp, data_phy_cycles;
+ u32 delta;
struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
@@ -466,50 +469,30 @@
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
+ horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
else
horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
- dsi_tmp_buf_bpp;
+ dsi_tmp_buf_bpp - 10;
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
- timing->da_hs_zero + timing->da_hs_exit;
+ timing->da_hs_zero + timing->da_hs_exit + 3;
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
- if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
- data_phy_cycles * dsi->lanes + 18) {
- horizontal_frontporch_byte =
- vm->hfront_porch * dsi_tmp_buf_bpp -
- (data_phy_cycles * dsi->lanes + 18) *
- vm->hfront_porch /
- (vm->hfront_porch + vm->hback_porch);
+ delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
- horizontal_backporch_byte =
- horizontal_backporch_byte -
- (data_phy_cycles * dsi->lanes + 18) *
- vm->hback_porch /
- (vm->hfront_porch + vm->hback_porch);
- } else {
- DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp;
- }
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
+ horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
+ data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
+
+ if (horizontal_front_back_byte > data_phy_cycles_byte) {
+ horizontal_frontporch_byte -= data_phy_cycles_byte *
+ horizontal_frontporch_byte /
+ horizontal_front_back_byte;
+
+ horizontal_backporch_byte -= data_phy_cycles_byte *
+ horizontal_backporch_byte /
+ horizontal_front_back_byte;
} else {
- if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
- data_phy_cycles * dsi->lanes + 12) {
- horizontal_frontporch_byte =
- vm->hfront_porch * dsi_tmp_buf_bpp -
- (data_phy_cycles * dsi->lanes + 12) *
- vm->hfront_porch /
- (vm->hfront_porch + vm->hback_porch);
- horizontal_backporch_byte = horizontal_backporch_byte -
- (data_phy_cycles * dsi->lanes + 12) *
- vm->hback_porch /
- (vm->hfront_porch + vm->hback_porch);
- } else {
- DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
- horizontal_frontporch_byte = vm->hfront_porch *
- dsi_tmp_buf_bpp;
- }
+ DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
}
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index b721b8b..9e1224d 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -22,6 +22,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
@@ -484,17 +485,27 @@
writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
}
+static bool mxsfb_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_primary_atomic_update,
};
static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_overlay_atomic_update,
};
static const struct drm_plane_funcs mxsfb_plane_funcs = {
+ .format_mod_supported = mxsfb_format_mod_supported,
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 56b335a..7daa12ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1214,8 +1214,8 @@
}
reg->bus.offset = handle;
- ret = 0;
}
+ ret = 0;
break;
default:
ret = -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 549bc67..c205138 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -558,8 +558,10 @@
NV_PRINTK(err, cli, "validating bo list\n");
validate_fini(op, chan, NULL, NULL);
return ret;
+ } else if (ret > 0) {
+ *apply_relocs = true;
}
- *apply_relocs = ret;
+
return 0;
}
@@ -662,7 +664,6 @@
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
- u_free(reloc);
return ret;
}
@@ -872,9 +873,10 @@
break;
}
}
- u_free(reloc);
}
out_prevalid:
+ if (!IS_ERR(reloc))
+ u_free(reloc);
u_free(bo);
u_free(push);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 033fd30..282e4c8 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -195,8 +195,7 @@
sdi->pixelclock = adjusted_mode->clock * 1000;
}
-static void sdi_bridge_enable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+static void sdi_bridge_enable(struct drm_bridge *bridge)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
struct dispc_clock_info dispc_cinfo;
@@ -259,8 +258,7 @@
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_bridge_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state)
+static void sdi_bridge_disable(struct drm_bridge *bridge)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
@@ -278,8 +276,8 @@
.mode_valid = sdi_bridge_mode_valid,
.mode_fixup = sdi_bridge_mode_fixup,
.mode_set = sdi_bridge_mode_set,
- .atomic_enable = sdi_bridge_enable,
- .atomic_disable = sdi_bridge_disable,
+ .enable = sdi_bridge_enable,
+ .disable = sdi_bridge_disable,
};
static void sdi_bridge_init(struct sdi_device *sdi)
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index e95fdfb..ba0b3ea 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -629,7 +629,7 @@
lcd->spi = spi;
mutex_init(&lcd->mutex);
- lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(lcd->reset_gpio)) {
dev_err(&spi->dev, "failed to get reset GPIO\n");
return PTR_ERR(lcd->reset_gpio);
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f292c6a..41edd0a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -544,7 +544,7 @@
struct device_node *port, *endpoint;
int ret = 0, child_count = 0;
const char *name;
- u32 endpoint_id;
+ u32 endpoint_id = 0;
lvds->drm_dev = drm_dev;
port = of_graph_get_port_by_id(dev->of_node, 1);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ba9d1c3..e4baf07 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -90,7 +90,7 @@
if (!fpriv)
return -ENOMEM;
- idr_init(&fpriv->contexts);
+ idr_init_base(&fpriv->contexts, 1);
mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 5a4fd0d..47d26b5 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -129,7 +129,6 @@
if (!output->ddc) {
err = -EPROBE_DEFER;
- of_node_put(ddc);
return err;
}
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e88a17c..cc2aa23 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -397,7 +397,6 @@
struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
- int (*remove)(struct tegra_sor *sor);
void (*audio_enable)(struct tegra_sor *sor);
void (*audio_disable)(struct tegra_sor *sor);
};
@@ -2942,6 +2941,24 @@
.atomic_check = tegra_sor_encoder_atomic_check,
};
+static void tegra_sor_disable_regulator(void *data)
+{
+ struct regulator *reg = data;
+
+ regulator_disable(reg);
+}
+
+static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg)
+{
+ int err;
+
+ err = regulator_enable(reg);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg);
+}
+
static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
{
int err;
@@ -2953,7 +2970,7 @@
return PTR_ERR(sor->avdd_io_supply);
}
- err = regulator_enable(sor->avdd_io_supply);
+ err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
err);
@@ -2967,7 +2984,7 @@
return PTR_ERR(sor->vdd_pll_supply);
}
- err = regulator_enable(sor->vdd_pll_supply);
+ err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
err);
@@ -2981,7 +2998,7 @@
return PTR_ERR(sor->hdmi_supply);
}
- err = regulator_enable(sor->hdmi_supply);
+ err = tegra_sor_enable_regulator(sor, sor->hdmi_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
return err;
@@ -2992,19 +3009,9 @@
return 0;
}
-static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->hdmi_supply);
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
.name = "HDMI",
.probe = tegra_sor_hdmi_probe,
- .remove = tegra_sor_hdmi_remove,
.audio_enable = tegra_sor_hdmi_audio_enable,
.audio_disable = tegra_sor_hdmi_audio_disable,
};
@@ -3017,7 +3024,7 @@
if (IS_ERR(sor->avdd_io_supply))
return PTR_ERR(sor->avdd_io_supply);
- err = regulator_enable(sor->avdd_io_supply);
+ err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
if (err < 0)
return err;
@@ -3025,25 +3032,16 @@
if (IS_ERR(sor->vdd_pll_supply))
return PTR_ERR(sor->vdd_pll_supply);
- err = regulator_enable(sor->vdd_pll_supply);
+ err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
if (err < 0)
return err;
return 0;
}
-static int tegra_sor_dp_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
static const struct tegra_sor_ops tegra_sor_dp_ops = {
.name = "DP",
.probe = tegra_sor_dp_probe,
- .remove = tegra_sor_dp_remove,
};
static int tegra_sor_init(struct host1x_client *client)
@@ -3145,6 +3143,7 @@
if (err < 0) {
dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
err);
+ clk_disable_unprepare(sor->clk);
return err;
}
@@ -3152,12 +3151,17 @@
}
err = clk_prepare_enable(sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk);
return err;
+ }
err = clk_prepare_enable(sor->clk_dp);
- if (err < 0)
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk);
return err;
+ }
return 0;
}
@@ -3764,17 +3768,16 @@
return err;
err = tegra_output_probe(&sor->output);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to probe output: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to probe output\n");
if (sor->ops && sor->ops->probe) {
err = sor->ops->probe(sor);
if (err < 0) {
dev_err(&pdev->dev, "failed to probe %s: %d\n",
sor->ops->name, err);
- goto output;
+ goto remove;
}
}
@@ -3955,9 +3958,6 @@
rpm_disable:
pm_runtime_disable(&pdev->dev);
remove:
- if (sor->ops && sor->ops->remove)
- sor->ops->remove(sor);
-output:
tegra_output_remove(&sor->output);
return err;
}
@@ -3976,12 +3976,6 @@
pm_runtime_disable(&pdev->dev);
- if (sor->ops && sor->ops->remove) {
- err = sor->ops->remove(sor);
- if (err < 0)
- dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
- }
-
tegra_output_remove(&sor->output);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 19b75be..c5f2944 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -219,6 +219,7 @@
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
+ struct drm_private_obj hvs_channels;
struct drm_private_obj load_tracker;
/* List of vc4_debugfs_info_entry for adding to debugfs once
@@ -531,6 +532,9 @@
unsigned int top;
unsigned int bottom;
} margins;
+
+ /* Transitional state below, only valid during atomic commits */
+ bool update_muxing;
};
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 95779d5..afc178b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -760,12 +760,54 @@
{
}
+#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
+#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL
+
+static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ unsigned long long pixel_rate = mode->clock * 1000;
+ unsigned long long tmds_rate;
+
+ if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
+ (mode->hsync_end % 2) || (mode->htotal % 2)))
+ return -EINVAL;
+
+ /*
+ * The 1440p@60 pixel rate is in the same range than the first
+ * WiFi channel (between 2.4GHz and 2.422GHz with 22MHz
+ * bandwidth). Slightly lower the frequency to bring it out of
+ * the WiFi range.
+ */
+ tmds_rate = pixel_rate * 10;
+ if (vc4_hdmi->disable_wifi_frequencies &&
+ (tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ &&
+ tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) {
+ mode->clock = 238560;
+ pixel_rate = mode->clock * 1000;
+ }
+
+ if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
+ return -EINVAL;
+
+ return 0;
+}
+
static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ ((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
+ (mode->hsync_end % 2) || (mode->htotal % 2)))
+ return MODE_H_ILLEGAL;
+
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
@@ -773,6 +815,7 @@
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
+ .atomic_check = vc4_hdmi_encoder_atomic_check,
.mode_valid = vc4_hdmi_encoder_mode_valid,
.disable = vc4_hdmi_encoder_disable,
.enable = vc4_hdmi_encoder_enable,
@@ -1694,6 +1737,9 @@
vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
}
+ vc4_hdmi->disable_wifi_frequencies =
+ of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
@@ -1817,6 +1863,7 @@
PHY_LANE_2,
PHY_LANE_CK,
},
+ .unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
@@ -1842,6 +1889,7 @@
PHY_LANE_CK,
PHY_LANE_2,
},
+ .unsupported_odd_h_timings = true,
.init_resources = vc5_hdmi_init_resources,
.csc_setup = vc5_hdmi_csc_setup,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 63c6f8b..0526a9c 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -62,6 +62,9 @@
*/
enum vc4_hdmi_phy_channel phy_lane_mapping[4];
+ /* The BCM2711 cannot deal with odd horizontal pixel timings */
+ bool unsupported_odd_h_timings;
+
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
@@ -139,6 +142,14 @@
int hpd_gpio;
bool hpd_active_low;
+ /*
+ * On some systems (like the RPi4), some modes are in the same
+ * frequency range than the WiFi channels (1440p@60Hz for
+ * example). Should we take evasive actions because that system
+ * has a wifi adapter?
+ */
+ bool disable_wifi_frequencies;
+
struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 2b951ca..ba310c0 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -24,6 +24,8 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
+#define HVS_NUM_CHANNELS 3
+
struct vc4_ctm_state {
struct drm_private_state base;
struct drm_color_ctm *ctm;
@@ -35,6 +37,17 @@
return container_of(priv, struct vc4_ctm_state, base);
}
+struct vc4_hvs_state {
+ struct drm_private_state base;
+ unsigned int unassigned_channels;
+};
+
+static struct vc4_hvs_state *
+to_vc4_hvs_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_hvs_state, base);
+}
+
struct vc4_load_tracker_state {
struct drm_private_state base;
u64 hvs_load;
@@ -113,7 +126,7 @@
drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs);
- return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL);
+ return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
}
/* Converts a DRM S31.32 value to the HW S0.9 format. */
@@ -169,6 +182,19 @@
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
+static struct vc4_hvs_state *
+vc4_hvs_get_global_state(struct drm_atomic_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct drm_private_state *priv_state;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_vc4_hvs_state(priv_state);
+}
+
static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_atomic_state *state)
{
@@ -213,10 +239,7 @@
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- unsigned char dsp2_mux = 0;
- unsigned char dsp3_mux = 3;
- unsigned char dsp4_mux = 3;
- unsigned char dsp5_mux = 3;
+ unsigned char mux;
unsigned int i;
u32 reg;
@@ -224,50 +247,59 @@
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- if (!crtc_state->active)
+ if (!vc4_state->update_muxing)
continue;
switch (vc4_crtc->data->hvs_output) {
case 2:
- dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ reg = HVS_READ(SCALER_DISPECTRL);
+ HVS_WRITE(SCALER_DISPECTRL,
+ (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
break;
case 3:
- dsp3_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ HVS_WRITE(SCALER_DISPCTRL,
+ (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
break;
case 4:
- dsp4_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ HVS_WRITE(SCALER_DISPEOLN,
+ (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
+
break;
case 5:
- dsp5_mux = vc4_state->assigned_channel;
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ mux = 3;
+ else
+ mux = vc4_state->assigned_channel;
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ HVS_WRITE(SCALER_DISPDITHER,
+ (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
+ VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
break;
default:
break;
}
}
-
- reg = HVS_READ(SCALER_DISPECTRL);
- HVS_WRITE(SCALER_DISPECTRL,
- (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
- VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
-
- reg = HVS_READ(SCALER_DISPCTRL);
- HVS_WRITE(SCALER_DISPCTRL,
- (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
- VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
-
- reg = HVS_READ(SCALER_DISPEOLN);
- HVS_WRITE(SCALER_DISPEOLN,
- (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
- VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
-
- reg = HVS_READ(SCALER_DISPDITHER);
- HVS_WRITE(SCALER_DISPDITHER,
- (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
- VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
}
static void
@@ -657,53 +689,123 @@
&load_state->base,
&vc4_load_tracker_state_funcs);
- return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL);
+ return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
}
-#define NUM_OUTPUTS 6
-#define NUM_CHANNELS 3
-
-static int
-vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+static struct drm_private_state *
+vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
{
- unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
+ struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
+ struct vc4_hvs_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ state->unassigned_channels = old_state->unassigned_channels;
+
+ return &state->base;
+}
+
+static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
+
+ kfree(hvs_state);
+}
+
+static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
+ .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
+ .atomic_destroy_state = vc4_hvs_channels_destroy_state,
+};
+
+static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ drm_atomic_private_obj_fini(&vc4->hvs_channels);
+}
+
+static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_hvs_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
+ drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
+ &state->base,
+ &vc4_hvs_state_funcs);
+
+ return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
+}
+
+/*
+ * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
+ * the TXP (and therefore all the CRTCs found on that platform).
+ *
+ * The naive (and our initial) implementation would just iterate over
+ * all the active CRTCs, try to find a suitable FIFO, and then remove it
+ * from the pool of available FIFOs. However, there are a few corner
+ * cases that need to be considered:
+ *
+ * - When running in a dual-display setup (so with two CRTCs involved),
+ * we can update the state of a single CRTC (for example by changing
+ * its mode using xrandr under X11) without affecting the other. In
+ * this case, the other CRTC wouldn't be in the state at all, so we
+ * need to consider all the running CRTCs in the DRM device to assign
+ * a FIFO, not just the one in the state.
+ *
+ * - To fix the above, we can't use drm_atomic_get_crtc_state on all
+ * enabled CRTCs to pull their CRTC state into the global state, since
+ * a page flip would start considering their vblank to complete. Since
+ * we don't have a guarantee that they are actually active, that
+ * vblank might never happen, and shouldn't even be considered if we
+ * want to do a page flip on a single CRTC. That can be tested by
+ * doing a modetest -v first on HDMI1 and then on HDMI0.
+ *
+ * - Since we need the pixelvalve to be disabled and enabled back when
+ * the FIFO is changed, we should keep the FIFO assigned for as long
+ * as the CRTC is enabled, only considering it free again once that
+ * CRTC has been disabled. This can be tested by booting X11 on a
+ * single display, and changing the resolution down and then back up.
+ */
+static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct vc4_hvs_state *hvs_new_state;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
- int i, ret;
+ unsigned int i;
- /*
- * Since the HVS FIFOs are shared across all the pixelvalves and
- * the TXP (and thus all the CRTCs), we need to pull the current
- * state of all the enabled CRTCs so that an update to a single
- * CRTC still keeps the previous FIFOs enabled and assigned to
- * the same CRTCs, instead of evaluating only the CRTC being
- * modified.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_state *crtc_state;
-
- if (!crtc->state->enable)
- continue;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
+ hvs_new_state = vc4_hvs_get_global_state(state);
+ if (!hvs_new_state)
+ return -EINVAL;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct vc4_crtc_state *old_vc4_crtc_state =
+ to_vc4_crtc_state(old_crtc_state);
struct vc4_crtc_state *new_vc4_crtc_state =
to_vc4_crtc_state(new_crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
unsigned int matching_channels;
- if (old_crtc_state->enable && !new_crtc_state->enable)
- new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
-
- if (!new_crtc_state->enable)
+ /* Nothing to do here, let's skip it */
+ if (old_crtc_state->enable == new_crtc_state->enable)
continue;
- if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
- unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
+ /* Muxing will need to be modified, mark it as such */
+ new_vc4_crtc_state->update_muxing = true;
+
+ /* If we're disabling our CRTC, we put back our channel */
+ if (!new_crtc_state->enable) {
+ hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
+ new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
}
@@ -731,17 +833,29 @@
* the future, we will need to have something smarter,
* but it works so far.
*/
- matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
+ matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
if (matching_channels) {
unsigned int channel = ffs(matching_channels) - 1;
new_vc4_crtc_state->assigned_channel = channel;
- unassigned_channels &= ~BIT(channel);
+ hvs_new_state->unassigned_channels &= ~BIT(channel);
} else {
return -EINVAL;
}
}
+ return 0;
+}
+
+static int
+vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = vc4_pv_muxing_atomic_check(dev, state);
+ if (ret)
+ return ret;
+
ret = vc4_ctm_atomic_check(dev, state);
if (ret < 0)
return ret;
@@ -808,6 +922,10 @@
if (ret)
return ret;
+ ret = vc4_hvs_channels_obj_init(vc4);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a97a9d0..a49e0ed 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -734,6 +734,7 @@
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
depends on MELLANOX_PLATFORM && ARM64
+ select I2C_SLAVE
help
Enabling this option will add I2C SMBus support for Mellanox BlueField
system.
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index c98529c..e6f8d6e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -412,6 +412,19 @@
dma->chan_using = NULL;
}
+static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits)
+{
+ unsigned int temp;
+
+ /*
+ * i2sr_clr_opcode is the value to clear all interrupts. Here we want to
+ * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits>
+ * toggled. This is required because i.MX needs W0C and Vybrid uses W1C.
+ */
+ temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+}
+
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
{
unsigned long orig_jiffies = jiffies;
@@ -424,8 +437,7 @@
/* check for arbitration lost */
if (temp & I2SR_IAL) {
- temp &= ~I2SR_IAL;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
return -EAGAIN;
}
@@ -469,7 +481,7 @@
*/
readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100);
i2c_imx->i2csr = regval;
- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL);
} else {
wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
}
@@ -478,6 +490,16 @@
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
return -ETIMEDOUT;
}
+
+ /* check for arbitration lost */
+ if (i2c_imx->i2csr & I2SR_IAL) {
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
+
+ i2c_imx->i2csr = 0;
+ return -EAGAIN;
+ }
+
dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__);
i2c_imx->i2csr = 0;
return 0;
@@ -593,6 +615,8 @@
/* Stop I2C transaction */
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
if (i2c_imx->dma)
temp &= ~I2CR_DMAEN;
@@ -623,9 +647,7 @@
if (temp & I2SR_IIF) {
/* save status register */
i2c_imx->i2csr = temp;
- temp &= ~I2SR_IIF;
- temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
wake_up(&i2c_imx->queue);
return IRQ_HANDLED;
}
@@ -758,9 +780,12 @@
*/
dev_dbg(dev, "<%s> clear MSTA\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0, false);
+ if (!i2c_imx->stopped)
+ i2c_imx_bus_busy(i2c_imx, 0, false);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -885,9 +910,12 @@
dev_dbg(&i2c_imx->adapter.dev,
"<%s> clear MSTA\n", __func__);
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- i2c_imx_bus_busy(i2c_imx, 0, atomic);
+ if (!i2c_imx->stopped)
+ i2c_imx_bus_busy(i2c_imx, 0, atomic);
} else {
/*
* For i2c master receiver repeat restart operation like:
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 33574d4..2fb0532 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -1258,9 +1258,9 @@
return -EFAULT;
gpio_res->io = devm_ioremap(dev, params->start, size);
- if (IS_ERR(gpio_res->io)) {
+ if (!gpio_res->io) {
devm_release_mem_region(dev, params->start, size);
- return PTR_ERR(gpio_res->io);
+ return -ENOMEM;
}
return 0;
@@ -1323,9 +1323,9 @@
return -EFAULT;
corepll_res->io = devm_ioremap(dev, params->start, size);
- if (IS_ERR(corepll_res->io)) {
+ if (!corepll_res->io) {
devm_release_mem_region(dev, params->start, size);
- return PTR_ERR(corepll_res->io);
+ return -ENOMEM;
}
return 0;
@@ -1717,9 +1717,9 @@
return -EFAULT;
coalesce_res->io = ioremap(params->start, size);
- if (IS_ERR(coalesce_res->io)) {
+ if (!coalesce_res->io) {
release_mem_region(params->start, size);
- return PTR_ERR(coalesce_res->io);
+ return -ENOMEM;
}
priv->coalesce = coalesce_res;
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index f13735b..1c259b51 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -194,9 +194,9 @@
if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
- cci->master[0].status = -ENXIO;
+ cci->master[1].status = -ENXIO;
else
- cci->master[0].status = -EIO;
+ cci->master[1].status = -EIO;
writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
ret = IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index fbc04b6..5a47915 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -801,7 +801,8 @@
if (ret || qup->bus_err || qup->qup_err) {
reinit_completion(&qup->xfer);
- if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret) {
dev_err(qup->dev, "change to run state timed out");
goto desc_err;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 01bace4..d793355 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -126,26 +126,9 @@
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
- bool tick;
-
- if (!static_cpu_has(X86_FEATURE_ARAT)) {
- /*
- * Switch over to one-shot tick broadcast if the target C-state
- * is deeper than C1.
- */
- if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) {
- tick = true;
- tick_broadcast_enter();
- } else {
- tick = false;
- }
- }
mwait_idle_with_hints(eax, ecx);
- if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
- tick_broadcast_exit();
-
return index;
}
@@ -1157,6 +1140,20 @@
return false;
}
+static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
+{
+ unsigned long eax = flg2MWAIT(state->flags);
+
+ if (boot_cpu_has(X86_FEATURE_ARAT))
+ return false;
+
+ /*
+ * Switch over to one-shot tick broadcast if the target C-state
+ * is deeper than C1.
+ */
+ return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
+}
+
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
#include <acpi/processor.h>
@@ -1269,6 +1266,9 @@
if (disabled_states_mask & BIT(cstate))
state->flags |= CPUIDLE_FLAG_OFF;
+ if (intel_idle_state_needs_timer_stop(state))
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+
state->enter = intel_idle;
state->enter_s2idle = intel_idle_s2idle;
}
@@ -1507,6 +1507,9 @@
!(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
+ if (intel_idle_state_needs_timer_stop(&drv->states[drv->state_count]))
+ drv->states[drv->state_count].flags |= CPUIDLE_FLAG_TIMER_STOP;
+
drv->state_count++;
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 8017c40..7989b7e 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1269,9 +1269,6 @@
unsigned long flags;
rdma_for_each_port(device, port_num) {
- if (!rdma_ib_or_roce(device, port_num))
- continue;
-
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0121566..5afd142 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1522,6 +1522,7 @@
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto out;
}
@@ -2114,6 +2115,7 @@
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
+ cm_id_priv->timewait_info = NULL;
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 191e084..4e940fc 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -940,8 +940,8 @@
1);
EFA_SET(¶ms.modify_mask,
EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
- params.cur_qp_state = qp_attr->cur_qp_state;
- params.qp_state = qp_attr->qp_state;
+ params.cur_qp_state = cur_state;
+ params.qp_state = new_state;
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 8ca51e4..329ee4f 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -206,8 +207,6 @@
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
fd->rec_cpu_num = -1; /* no cpu affinity by default */
- fd->mm = current->mm;
- mmgrab(fd->mm);
fd->dd = dd;
fp->private_data = fd;
return 0;
@@ -711,7 +710,6 @@
deallocate_ctxt(uctxt);
done:
- mmdrop(fdata->mm);
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index b4c6bff..e09e824 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,6 +1,7 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -1451,7 +1452,6 @@
u32 invalid_tid_idx;
/* protect invalid_tids array and invalid_tid_idx */
spinlock_t invalid_lock;
- struct mm_struct *mm;
};
extern struct xarray hfi1_dev_table;
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 24ca17b..f3fb28e 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -48,23 +49,11 @@
#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h>
+#include <linux/sched/mm.h>
#include "mmu_rb.h"
#include "trace.h"
-struct mmu_rb_handler {
- struct mmu_notifier mn;
- struct rb_root_cached root;
- void *ops_arg;
- spinlock_t lock; /* protect the RB tree */
- struct mmu_rb_ops *ops;
- struct mm_struct *mm;
- struct list_head lru_list;
- struct work_struct del_work;
- struct list_head del_list;
- struct workqueue_struct *wq;
-};
-
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
static int mmu_notifier_range_start(struct mmu_notifier *,
@@ -92,37 +81,36 @@
return PAGE_ALIGN(node->addr + node->len) - 1;
}
-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_ops *ops,
struct workqueue_struct *wq,
struct mmu_rb_handler **handler)
{
- struct mmu_rb_handler *handlr;
+ struct mmu_rb_handler *h;
int ret;
- handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
- if (!handlr)
+ h = kmalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
return -ENOMEM;
- handlr->root = RB_ROOT_CACHED;
- handlr->ops = ops;
- handlr->ops_arg = ops_arg;
- INIT_HLIST_NODE(&handlr->mn.hlist);
- spin_lock_init(&handlr->lock);
- handlr->mn.ops = &mn_opts;
- handlr->mm = mm;
- INIT_WORK(&handlr->del_work, handle_remove);
- INIT_LIST_HEAD(&handlr->del_list);
- INIT_LIST_HEAD(&handlr->lru_list);
- handlr->wq = wq;
+ h->root = RB_ROOT_CACHED;
+ h->ops = ops;
+ h->ops_arg = ops_arg;
+ INIT_HLIST_NODE(&h->mn.hlist);
+ spin_lock_init(&h->lock);
+ h->mn.ops = &mn_opts;
+ INIT_WORK(&h->del_work, handle_remove);
+ INIT_LIST_HEAD(&h->del_list);
+ INIT_LIST_HEAD(&h->lru_list);
+ h->wq = wq;
- ret = mmu_notifier_register(&handlr->mn, handlr->mm);
+ ret = mmu_notifier_register(&h->mn, current->mm);
if (ret) {
- kfree(handlr);
+ kfree(h);
return ret;
}
- *handler = handlr;
+ *handler = h;
return 0;
}
@@ -134,7 +122,7 @@
struct list_head del_list;
/* Unregister first so we don't get any more notifications. */
- mmu_notifier_unregister(&handler->mn, handler->mm);
+ mmu_notifier_unregister(&handler->mn, handler->mn.mm);
/*
* Make sure the wq delete handler is finished running. It will not
@@ -166,6 +154,10 @@
int ret = 0;
trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
+
+ if (current->mm != handler->mn.mm)
+ return -EPERM;
+
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
if (node) {
@@ -180,6 +172,7 @@
__mmu_int_rb_remove(mnode, &handler->root);
list_del(&mnode->list); /* remove from LRU list */
}
+ mnode->handler = handler;
unlock:
spin_unlock_irqrestore(&handler->lock, flags);
return ret;
@@ -217,6 +210,9 @@
unsigned long flags;
bool ret = false;
+ if (current->mm != handler->mn.mm)
+ return ret;
+
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len);
if (node) {
@@ -239,6 +235,9 @@
unsigned long flags;
bool stop = false;
+ if (current->mm != handler->mn.mm)
+ return;
+
INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
@@ -272,6 +271,9 @@
{
unsigned long flags;
+ if (current->mm != handler->mn.mm)
+ return;
+
/* Validity of handler and node pointers has been checked by caller. */
trace_hfi1_mmu_rb_remove(node->addr, node->len);
spin_lock_irqsave(&handler->lock, flags);
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index f04cec1..423aacc 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -54,6 +55,7 @@
unsigned long len;
unsigned long __last;
struct rb_node node;
+ struct mmu_rb_handler *handler;
struct list_head list;
};
@@ -71,7 +73,19 @@
void *evict_arg, bool *stop);
};
-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+struct mmu_rb_handler {
+ struct mmu_notifier mn;
+ struct rb_root_cached root;
+ void *ops_arg;
+ spinlock_t lock; /* protect the RB tree */
+ struct mmu_rb_ops *ops;
+ struct list_head lru_list;
+ struct work_struct del_work;
+ struct list_head del_list;
+ struct workqueue_struct *wq;
+};
+
+int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_ops *ops,
struct workqueue_struct *wq,
struct mmu_rb_handler **handler);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index f81ca20..b94fc7f 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -173,15 +174,18 @@
{
struct page **pages;
struct hfi1_devdata *dd = fd->uctxt->dd;
+ struct mm_struct *mm;
if (mapped) {
pci_unmap_single(dd->pcidev, node->dma_addr,
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
pages = &node->pages[idx];
+ mm = mm_from_tid_node(node);
} else {
pages = &tidbuf->pages[idx];
+ mm = current->mm;
}
- hfi1_release_user_pages(fd->mm, pages, npages, mapped);
+ hfi1_release_user_pages(mm, pages, npages, mapped);
fd->tid_n_pinned -= npages;
}
@@ -216,12 +220,12 @@
* pages, accept the amount pinned so far and program only that.
* User space knows how to deal with partially programmed buffers.
*/
- if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
+ if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
kfree(pages);
return -ENOMEM;
}
- pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
+ pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
if (pinned <= 0) {
kfree(pages);
return pinned;
@@ -756,7 +760,7 @@
if (fd->use_mn) {
ret = mmu_interval_notifier_insert(
- &node->notifier, fd->mm,
+ &node->notifier, current->mm,
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
&tid_mn_ops);
if (ret)
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 332abb4..d45c7b6 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -1,6 +1,7 @@
#ifndef _HFI1_USER_EXP_RCV_H
#define _HFI1_USER_EXP_RCV_H
/*
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -95,4 +96,9 @@
int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
struct hfi1_tid_info *tinfo);
+static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
+{
+ return node->notifier.mm;
+}
+
#endif /* _HFI1_USER_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a92346e..4a4956f9 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,4 +1,5 @@
/*
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -188,7 +189,6 @@
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0);
- pq->mm = fd->mm;
iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
activate_packet_queue, NULL, NULL);
@@ -230,7 +230,7 @@
cq->nentries = hfi1_sdma_comp_ring_size;
- ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
+ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
&pq->handler);
if (ret) {
dd_dev_err(dd, "Failed to register with MMU %d", ret);
@@ -980,13 +980,13 @@
npages -= node->npages;
retry:
- if (!hfi1_can_pin_pages(pq->dd, pq->mm,
+ if (!hfi1_can_pin_pages(pq->dd, current->mm,
atomic_read(&pq->n_locked), npages)) {
cleared = sdma_cache_evict(pq, npages);
if (cleared >= npages)
goto retry;
}
- pinned = hfi1_acquire_user_pages(pq->mm,
+ pinned = hfi1_acquire_user_pages(current->mm,
((unsigned long)iovec->iov.iov_base +
(node->npages * PAGE_SIZE)), npages, 0,
pages + node->npages);
@@ -995,7 +995,7 @@
return pinned;
}
if (pinned != npages) {
- unpin_vector_pages(pq->mm, pages, node->npages, pinned);
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
return -EFAULT;
}
kfree(node->pages);
@@ -1008,7 +1008,8 @@
static void unpin_sdma_pages(struct sdma_mmu_node *node)
{
if (node->npages) {
- unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+ node->npages);
atomic_sub(node->npages, &node->pq->n_locked);
}
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 9972e0e..1e8c02f 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,6 +1,7 @@
#ifndef _HFI1_USER_SDMA_H
#define _HFI1_USER_SDMA_H
/*
+ * Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -133,7 +134,6 @@
unsigned long unpinned;
struct mmu_rb_handler *handler;
atomic_t n_locked;
- struct mm_struct *mm;
};
struct hfi1_user_sdma_comp_q {
@@ -250,4 +250,9 @@
struct iovec *iovec, unsigned long dim,
unsigned long *count);
+static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
+{
+ return node->rb.handler->mn.mm;
+}
+
#endif /* _HFI1_USER_SDMA_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 6d30850..0468028 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2936,6 +2936,7 @@
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
@@ -4989,11 +4990,11 @@
V2_QPC_BYTE_28_AT_M,
V2_QPC_BYTE_28_AT_S);
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
- V2_QPC_BYTE_212_RETRY_CNT_M,
- V2_QPC_BYTE_212_RETRY_CNT_S);
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
- V2_QPC_BYTE_244_RNR_CNT_M,
- V2_QPC_BYTE_244_RNR_CNT_S);
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S);
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 29c9dd4..be7f2fe 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -1661,7 +1661,7 @@
__le32 rsv_uars_rsv_qps;
};
#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
-#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
+#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0)
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 2408b27..584932d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -54,10 +54,6 @@
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
-static int push_mode;
-module_param(push_mode, int, 0644);
-MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
-
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
@@ -1580,7 +1576,6 @@
if (status)
goto exit;
iwdev->obj_next = iwdev->obj_mem;
- iwdev->push_mode = push_mode;
init_waitqueue_head(&iwdev->vchnl_waitq);
init_waitqueue_head(&dev->vf_reqs);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 581ecba..533f3ca 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -167,39 +167,16 @@
*/
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
- struct i40iw_ucontext *ucontext;
- u64 db_addr_offset, push_offset, pfn;
+ struct i40iw_ucontext *ucontext = to_ucontext(context);
+ u64 dbaddr;
- ucontext = to_ucontext(context);
- if (ucontext->iwdev->sc_dev.is_pf) {
- db_addr_offset = I40IW_DB_ADDR_OFFSET;
- push_offset = I40IW_PUSH_OFFSET;
- if (vma->vm_pgoff)
- vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
- } else {
- db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
- push_offset = I40IW_VF_PUSH_OFFSET;
- if (vma->vm_pgoff)
- vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
- }
+ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
- vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
+ dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
- if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- } else {
- if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- }
-
- pfn = vma->vm_pgoff +
- (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
- PAGE_SHIFT);
-
- return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
- vma->vm_page_prot, NULL);
+ return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot), NULL);
}
/**
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index c3cfea2..119b257 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -803,8 +803,10 @@
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox))
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
goto err_out_arm;
+ }
cq_context = mailbox->buf;
@@ -846,9 +848,9 @@
}
spin_lock_irq(&dev->cq_table.lock);
- if (mthca_array_set(&dev->cq_table.cq,
- cq->cqn & (dev->limits.num_cqs - 1),
- cq)) {
+ err = mthca_array_set(&dev->cq_table.cq,
+ cq->cqn & (dev->limits.num_cqs - 1), cq);
+ if (err) {
spin_unlock_irq(&dev->cq_table.lock);
goto err_out_free_mr;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 019642f..511c95b 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1936,6 +1936,15 @@
}
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+
rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
&qp->urq.db_rec_db2_data,
DB_REC_WIDTH_32B,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index c77cdb3..8c73377 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -241,6 +241,7 @@
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+ { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
@@ -418,6 +419,7 @@
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
+ XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 15d17c7..1f0d61b 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -183,6 +183,7 @@
"changed: [r%d c%d]: byte %02x\n",
row, col, new_state);
+ input_event(idev, EV_MSC, MSC_SCAN, pos);
input_report_key(idev, keycodes[pos],
new_state);
}
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index e413801..f515fae 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -568,12 +568,15 @@
dev->ctl_data->byte[HID_OR2] = dev->keybit;
dev->ctl_data->byte[HID_OR3] = 0x00;
+ dev->ctl_urb_pending = 1;
error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL);
- if (error)
+ if (error) {
+ dev->ctl_urb_pending = 0;
dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n",
__func__, error);
- else
+ } else {
dev->open = 1;
+ }
mutex_unlock(&dev->pm_mutex);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index cae1a3f..cb6ec59 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -82,6 +83,17 @@
DMI_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
},
+ {
+ /*
+ * Lenovo Yoga Tab2 1051L, something messes with the home-button
+ * IRQ settings, leading to a non working home-button.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a4c9b96..3a2dcf0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -219,6 +219,10 @@
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+ },
},
{ }
};
@@ -608,6 +612,48 @@
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
+ },
+ },
+ {
/* Advent 4211 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 944cbb5..abae23a 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1471,7 +1471,8 @@
if (error)
goto err_free_ports;
- if (aux_enable())
+ error = aux_enable();
+ if (error)
goto err_free_irq;
i8042_aux_irq_registered = true;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 98f17fa..b6f7536 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2183,11 +2183,11 @@
msleep(MXT_FW_RESET_TIME);
}
- error = mxt_acquire_irq(data);
+ error = mxt_check_retrigen(data);
if (error)
return error;
- error = mxt_check_retrigen(data);
+ error = mxt_acquire_irq(data);
if (error)
return error;
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 02c75ea..6612f9e 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -193,6 +193,18 @@
},
},
{
+ .ident = "Teclast X98 Pro",
+ .matches = {
+ /*
+ * Only match BIOS date, because the manufacturers
+ * BIOS does not report the board name at all
+ * (sometimes)...
+ */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
+ },
+ },
+ {
.ident = "WinBook TW100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index e694a9b..603a948 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -137,45 +137,25 @@
bool wake_irq_enabled;
};
-static int raydium_i2c_xfer(struct i2c_client *client,
- u32 addr, void *data, size_t len, bool is_read)
+/*
+ * Header to be sent for RM_CMD_BANK_SWITCH command. This is used by
+ * raydium_i2c_{read|send} below.
+ */
+struct __packed raydium_bank_switch_header {
+ u8 cmd;
+ __be32 be_addr;
+};
+
+static int raydium_i2c_xfer(struct i2c_client *client, u32 addr,
+ struct i2c_msg *xfer, size_t xfer_count)
{
- struct raydium_bank_switch_header {
- u8 cmd;
- __be32 be_addr;
- } __packed header = {
- .cmd = RM_CMD_BANK_SWITCH,
- .be_addr = cpu_to_be32(addr),
- };
-
- u8 reg_addr = addr & 0xff;
-
- struct i2c_msg xfer[] = {
- {
- .addr = client->addr,
- .len = sizeof(header),
- .buf = (u8 *)&header,
- },
- {
- .addr = client->addr,
- .len = 1,
- .buf = ®_addr,
- },
- {
- .addr = client->addr,
- .len = len,
- .buf = data,
- .flags = is_read ? I2C_M_RD : 0,
- }
- };
-
+ int ret;
/*
* If address is greater than 255, then RM_CMD_BANK_SWITCH needs to be
* sent first. Else, skip the header i.e. xfer[0].
*/
int xfer_start_idx = (addr > 0xff) ? 0 : 1;
- size_t xfer_count = ARRAY_SIZE(xfer) - xfer_start_idx;
- int ret;
+ xfer_count -= xfer_start_idx;
ret = i2c_transfer(client->adapter, &xfer[xfer_start_idx], xfer_count);
if (likely(ret == xfer_count))
@@ -189,10 +169,46 @@
{
int tries = 0;
int error;
+ u8 *tx_buf;
+ u8 reg_addr = addr & 0xff;
+
+ tx_buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
+
+ tx_buf[0] = reg_addr;
+ memcpy(tx_buf + 1, data, len);
do {
- error = raydium_i2c_xfer(client, addr, (void *)data, len,
- false);
+ struct raydium_bank_switch_header header = {
+ .cmd = RM_CMD_BANK_SWITCH,
+ .be_addr = cpu_to_be32(addr),
+ };
+
+ /*
+ * Perform as a single i2c_transfer transaction to ensure that
+ * no other I2C transactions are initiated on the bus to any
+ * other device in between. Initiating transacations to other
+ * devices after RM_CMD_BANK_SWITCH is sent is known to cause
+ * issues. This is also why regmap infrastructure cannot be used
+ * for this driver. Regmap handles page(bank) switch and reads
+ * as separate i2c_transfer() operations. This can result in
+ * problems if the Raydium device is on a shared I2C bus.
+ */
+ struct i2c_msg xfer[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(header),
+ .buf = (u8 *)&header,
+ },
+ {
+ .addr = client->addr,
+ .len = len + 1,
+ .buf = tx_buf,
+ },
+ };
+
+ error = raydium_i2c_xfer(client, addr, xfer, ARRAY_SIZE(xfer));
if (likely(!error))
return 0;
@@ -206,12 +222,46 @@
static int raydium_i2c_read(struct i2c_client *client,
u32 addr, void *data, size_t len)
{
- size_t xfer_len;
int error;
while (len) {
- xfer_len = min_t(size_t, len, RM_MAX_READ_SIZE);
- error = raydium_i2c_xfer(client, addr, data, xfer_len, true);
+ u8 reg_addr = addr & 0xff;
+ struct raydium_bank_switch_header header = {
+ .cmd = RM_CMD_BANK_SWITCH,
+ .be_addr = cpu_to_be32(addr),
+ };
+ size_t xfer_len = min_t(size_t, len, RM_MAX_READ_SIZE);
+
+ /*
+ * Perform as a single i2c_transfer transaction to ensure that
+ * no other I2C transactions are initiated on the bus to any
+ * other device in between. Initiating transacations to other
+ * devices after RM_CMD_BANK_SWITCH is sent is known to cause
+ * issues. This is also why regmap infrastructure cannot be used
+ * for this driver. Regmap handles page(bank) switch and writes
+ * as separate i2c_transfer() operations. This can result in
+ * problems if the Raydium device is on a shared I2C bus.
+ */
+ struct i2c_msg xfer[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(header),
+ .buf = (u8 *)&header,
+ },
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = ®_addr,
+ },
+ {
+ .addr = client->addr,
+ .len = xfer_len,
+ .buf = data,
+ .flags = I2C_M_RD,
+ }
+ };
+
+ error = raydium_i2c_xfer(client, addr, xfer, ARRAY_SIZE(xfer));
if (unlikely(error))
return error;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 974a667..5ad519c 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -1083,7 +1083,6 @@
count++;
count += of_count_icc_providers(child);
}
- of_node_put(np);
return count;
}
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index 42c6c55..e8371d4 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -182,7 +182,7 @@
DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, 20, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, -1, -1, MSM8916_SNOC_QDSS_INT);
DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
@@ -208,14 +208,14 @@
DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, -1, 0);
DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, 106, 0);
-DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, -1, 0);
DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
@@ -239,7 +239,7 @@
DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
@@ -249,7 +249,7 @@
DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, 100, 131, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, -1, -1, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index 3a313e1..da68ce3 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -618,6 +618,8 @@
do_div(rate, src_qn->buswidth);
+ rate = min_t(u32, rate, INT_MAX);
+
if (src_qn->rate == rate)
return 0;
@@ -635,6 +637,14 @@
return 0;
}
+static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
+{
+ *avg = 0;
+ *peak = 0;
+
+ return 0;
+}
+
static int msm8974_icc_probe(struct platform_device *pdev)
{
const struct msm8974_icc_desc *desc;
@@ -688,6 +698,7 @@
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
+ provider->get_bw = msm8974_get_bw;
ret = icc_provider_add(provider);
if (ret) {
@@ -758,6 +769,7 @@
.driver = {
.name = "qnoc-msm8974",
.of_match_table = msm8974_noc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(msm8974_noc_driver);
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index d4769a5..9820709 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -157,8 +157,8 @@
}
DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 8964770..494b42a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -257,7 +257,7 @@
#define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60)
#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
-#define DTE_IRQ_TABLE_LEN (8ULL << 1)
+#define DTE_IRQ_TABLE_LEN (9ULL << 1)
#define DTE_IRQ_REMAP_ENABLE 1ULL
#define PAGE_MODE_NONE 0x00
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 82e4af8..23a790f 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -29,6 +29,7 @@
#include <asm/iommu_table.h>
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
+#include <asm/set_memory.h>
#include <linux/crash_dump.h>
@@ -672,11 +673,27 @@
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
}
+static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
+ gfp_t gfp, size_t size)
+{
+ int order = get_order(size);
+ void *buf = (void *)__get_free_pages(gfp, order);
+
+ if (buf &&
+ iommu_feature(iommu, FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, (1 << order))) {
+ free_pages((unsigned long)buf, order);
+ buf = NULL;
+ }
+
+ return buf;
+}
+
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_event_buffer(struct amd_iommu *iommu)
{
- iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(EVT_BUFFER_SIZE));
+ iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ EVT_BUFFER_SIZE);
return iommu->evt_buf ? 0 : -ENOMEM;
}
@@ -715,8 +732,8 @@
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_ppr_log(struct amd_iommu *iommu)
{
- iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(PPR_LOG_SIZE));
+ iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
+ PPR_LOG_SIZE);
return iommu->ppr_log ? 0 : -ENOMEM;
}
@@ -838,7 +855,7 @@
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
- iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
+ iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
return iommu->cmd_sem ? 0 : -ENOMEM;
}
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index be43180..702fbaa 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -69,6 +69,10 @@
{
struct qcom_smmu *qsmmu;
+ /* Check to make sure qcom_scm has finished probing */
+ if (!qcom_scm_is_available())
+ return ERR_PTR(-EPROBE_DEFER);
+
qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 11319e4..b46dbfa6 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -986,7 +986,8 @@
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
- iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
+ if (ecap_vcs(iommu->ecap))
+ iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index c71a9c2..a49afa1 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1833,7 +1833,7 @@
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
}
- if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
+ if (vccap_pasid(iommu->vccap))
ioasid_unregister_allocator(&iommu->pasid_allocator);
#endif
@@ -3212,7 +3212,7 @@
* is active. All vIOMMU allocators will eventually be calling the same
* host allocator.
*/
- if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
+ if (!vccap_pasid(iommu->vccap))
return;
pr_info("Register custom PASID allocator\n");
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b53446b..0f4dc25 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -264,16 +264,18 @@
*/
iommu_alloc_default_domain(group, dev);
- if (group->default_domain)
+ if (group->default_domain) {
ret = __iommu_attach_device(group->default_domain, dev);
+ if (ret) {
+ iommu_group_put(group);
+ goto err_release;
+ }
+ }
iommu_create_device_direct_mappings(group, dev);
iommu_group_put(group);
- if (ret)
- goto err_release;
-
if (ops->probe_finalize)
ops->probe_finalize(dev);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0fec319..4069c21 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -42,7 +42,6 @@
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
-#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
@@ -4741,9 +4740,6 @@
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
- continue;
-
base = its->base;
its->ctlr_save = readl_relaxed(base + GITS_CTLR);
err = its_force_quiescent(base);
@@ -4762,9 +4758,6 @@
list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
void __iomem *base;
- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
- continue;
-
base = its->base;
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
}
@@ -4784,9 +4777,6 @@
void __iomem *base;
int i;
- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
- continue;
-
base = its->base;
/*
@@ -4794,7 +4784,10 @@
* don't restore it since writing to CBASER or BASER<n>
* registers is undefined according to the GIC v3 ITS
* Specification.
+ *
+ * Firmware resuming with the ITS enabled is terminally broken.
*/
+ WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
ret = its_force_quiescent(base);
if (ret) {
pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
@@ -5074,9 +5067,6 @@
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
- if (GITS_TYPER_HCC(typer))
- its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
-
err = its_init_domain(handle, its);
if (err)
goto out_free_tables;
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index 1d02762..abd011f 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -136,7 +136,7 @@
if (fwspec->param_count != 2)
return -EINVAL;
*hwirq = fwspec->param[0];
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
}
return 0;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9644424..4bc453f 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -712,10 +712,6 @@
return cache->sectors_per_block_shift >= 0;
}
-/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
-#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
-__always_inline
-#endif
static dm_block_t block_div(dm_block_t b, uint32_t n)
{
do_div(b, n);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 3fc3757..5a7a1b9 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3462,7 +3462,7 @@
int r;
if (a->alg_string) {
- *hash = crypto_alloc_shash(a->alg_string, 0, 0);
+ *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(*hash)) {
*error = error_alg;
r = PTR_ERR(*hash);
@@ -3519,7 +3519,7 @@
struct journal_completion comp;
comp.ic = ic;
- ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
+ ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(ic->journal_crypt)) {
*error = "Invalid journal cipher";
r = PTR_ERR(ic->journal_crypt);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ce543b7..7eeb7c4 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -18,7 +18,6 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
-#include <linux/lcm.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>
@@ -1247,12 +1246,6 @@
void dm_table_event(struct dm_table *t)
{
- /*
- * You can no longer call dm_table_event() from interrupt
- * context, use a bottom half instead.
- */
- BUG_ON(in_interrupt());
-
mutex_lock(&_event_lock);
if (t->event_fn)
t->event_fn(t->event_context);
@@ -1455,10 +1448,6 @@
zone_sectors = ti_limits.chunk_sectors;
}
- /* Stack chunk_sectors if target-specific splitting is required */
- if (ti->max_io_len)
- ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len,
- ti_limits.chunk_sectors);
/* Set I/O hints portion of queue limits */
if (ti->type->io_hints)
ti->type->io_hints(ti, &ti_limits);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 9ae4ce7..d5223a0 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -319,7 +319,7 @@
#else
static int persistent_memory_claim(struct dm_writecache *wc)
{
- BUG();
+ return -EOPNOTSUPP;
}
#endif
@@ -2041,7 +2041,7 @@
struct wc_memory_superblock s;
static struct dm_arg _args[] = {
- {0, 10, "Invalid number of feature args"},
+ {0, 16, "Invalid number of feature args"},
};
as.argc = argc;
@@ -2479,6 +2479,8 @@
extra_args += 2;
if (wc->autocommit_time_set)
extra_args += 2;
+ if (wc->max_age != MAX_AGE_UNSPECIFIED)
+ extra_args += 2;
if (wc->cleaner)
extra_args++;
if (wc->writeback_fua_set)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c18fc25..4e0cbfe 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -476,8 +476,10 @@
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ if (!map) {
+ ret = -EIO;
+ goto out;
+ }
do {
struct dm_target *tgt;
@@ -507,7 +509,6 @@
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
- __acquires(md->io_barrier)
{
struct dm_target *tgt;
struct dm_table *map;
@@ -541,7 +542,6 @@
}
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
- __releases(md->io_barrier)
{
dm_put_live_table(md, srcu_idx);
}
@@ -1037,15 +1037,18 @@
sector_t max_len;
/*
- * Does the target need to split even further?
- * - q->limits.chunk_sectors reflects ti->max_io_len so
- * blk_max_size_offset() provides required splitting.
- * - blk_max_size_offset() also respects q->limits.max_sectors
+ * Does the target need to split IO even further?
+ * - varied (per target) IO splitting is a tenet of DM; this
+ * explains why stacked chunk_sectors based splitting via
+ * blk_max_size_offset() isn't possible here. So pass in
+ * ti->max_io_len to override stacked chunk_sectors.
*/
- max_len = blk_max_size_offset(ti->table->md->queue,
- target_offset);
- if (len > max_len)
- len = max_len;
+ if (ti->max_io_len) {
+ max_len = blk_max_size_offset(ti->table->md->queue,
+ target_offset, ti->max_io_len);
+ if (len > max_len)
+ len = max_len;
+ }
return len;
}
@@ -1196,11 +1199,9 @@
* ->zero_page_range() is mandatory dax operation. If we are
* here, something is wrong.
*/
- dm_put_live_table(md, srcu_idx);
goto out;
}
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
-
out:
dm_put_live_table(md, srcu_idx);
diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index e4d8446..04b13cd 100644
--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -88,13 +88,15 @@
MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */
MSGCODE_GET_DEVICE_TYPE,
MSGCODE_SET_DEVICE_TYPE,
- MSGCODE_GET_HDMI_VERSION,
+ MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */
MSGCODE_SET_HDMI_VERSION,
MSGCODE_GET_OSD_NAME,
MSGCODE_SET_OSD_NAME,
MSGCODE_WRITE_EEPROM,
MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */
MSGCODE_SET_ACTIVE_SOURCE,
+ MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */
+ MSGCODE_SET_AUTO_POWER_ON,
MSGCODE_FRAME_EOM = 0x80,
MSGCODE_FRAME_ACK = 0x40,
@@ -143,6 +145,8 @@
"WRITE_EEPROM",
"GET_ADAPTER_TYPE",
"SET_ACTIVE_SOURCE",
+ "GET_AUTO_POWER_ON",
+ "SET_AUTO_POWER_ON",
};
static const char *pulse8_msgname(u8 cmd)
@@ -579,12 +583,14 @@
if (err)
goto unlock;
- cmd[0] = MSGCODE_SET_HDMI_VERSION;
- cmd[1] = adap->log_addrs.cec_version;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 0);
- if (err)
- goto unlock;
+ if (pulse8->vers < 10) {
+ cmd[0] = MSGCODE_SET_HDMI_VERSION;
+ cmd[1] = adap->log_addrs.cec_version;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+ }
if (adap->log_addrs.osd_name[0]) {
size_t osd_len = strlen(adap->log_addrs.osd_name);
@@ -650,7 +656,6 @@
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
- pulse8->serio = NULL;
serio_set_drvdata(serio, NULL);
serio_close(serio);
}
@@ -692,6 +697,14 @@
dev_dbg(pulse8->dev, "Autonomous mode: %s",
data[0] ? "on" : "off");
+ if (pulse8->vers >= 10) {
+ cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (!err)
+ dev_dbg(pulse8->dev, "Auto Power On: %s",
+ data[0] ? "on" : "off");
+ }
+
cmd[0] = MSGCODE_GET_DEVICE_TYPE;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
@@ -753,12 +766,15 @@
dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
cec_phys_addr_exp(*pa));
- cmd[0] = MSGCODE_GET_HDMI_VERSION;
- err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
- if (err)
- return err;
- log_addrs->cec_version = data[0];
- dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+ log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
+ if (pulse8->vers < 10) {
+ cmd[0] = MSGCODE_GET_HDMI_VERSION;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (err)
+ return err;
+ log_addrs->cec_version = data[0];
+ dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+ }
cmd[0] = MSGCODE_GET_OSD_NAME;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
@@ -830,8 +846,10 @@
pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
dev_name(&serio->dev), caps, 1);
err = PTR_ERR_OR_ZERO(pulse8->adap);
- if (err < 0)
- goto free_device;
+ if (err < 0) {
+ kfree(pulse8);
+ return err;
+ }
pulse8->dev = &serio->dev;
serio_set_drvdata(serio, pulse8);
@@ -874,8 +892,6 @@
serio_close(serio);
delete_adap:
cec_delete_adapter(pulse8->adap);
-free_device:
- kfree(pulse8);
return err;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 4eab6d8..89e3839 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -414,6 +414,17 @@
vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
+ /*
+ * We need to set these flags here so that the videobuf2 core
+ * will call ->prepare()/->finish() cache sync/flush on vb2
+ * buffers when appropriate. However, we can avoid explicit
+ * ->prepare() and ->finish() cache sync for DMABUF buffers,
+ * because DMA exporter takes care of it.
+ */
+ if (q->memory != VB2_MEMORY_DMABUF) {
+ vb->need_cache_sync_on_prepare = 1;
+ vb->need_cache_sync_on_finish = 1;
+ }
for (plane = 0; plane < num_planes; ++plane) {
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index a3cb104..7e152bb 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -253,17 +253,31 @@
depends on MTK_IOMMU || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on VIDEO_MEDIATEK_VPU || MTK_SCP
+ # The two following lines ensure we have the same state ("m" or "y") as
+ # our dependencies, to avoid missing symbols during link.
+ depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
+ depends on MTK_SCP || !MTK_SCP
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
- select VIDEO_MEDIATEK_VPU
- select MTK_SCP
+ select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
+ select VIDEO_MEDIATEK_VCODEC_SCP if MTK_SCP
help
- Mediatek video codec driver provides HW capability to
- encode and decode in a range of video formats
- This driver rely on VPU driver to communicate with VPU.
+ Mediatek video codec driver provides HW capability to
+ encode and decode in a range of video formats on MT8173
+ and MT8183.
- To compile this driver as modules, choose M here: the
- modules will be called mtk-vcodec-dec and mtk-vcodec-enc.
+ Note that support for MT8173 requires VIDEO_MEDIATEK_VPU to
+ also be selected. Support for MT8183 depends on MTK_SCP.
+
+ To compile this driver as modules, choose M here: the
+ modules will be called mtk-vcodec-dec and mtk-vcodec-enc.
+
+config VIDEO_MEDIATEK_VCODEC_VPU
+ bool
+
+config VIDEO_MEDIATEK_VCODEC_SCP
+ bool
config VIDEO_MEM2MEM_DEINTERLACE
tristate "Deinterlace support"
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index cd902b1..63fce1b 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -307,6 +307,7 @@
* Suspend/resume support.
*/
+#ifdef CONFIG_PM
static int mmpcam_runtime_resume(struct device *dev)
{
struct mmp_camera *cam = dev_get_drvdata(dev);
@@ -352,6 +353,7 @@
return mccic_resume(&cam->mcam);
return 0;
}
+#endif
static const struct dev_pm_ops mmpcam_pm_ops = {
SET_RUNTIME_PM_OPS(mmpcam_runtime_suspend, mmpcam_runtime_resume, NULL)
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index f679c6e..4618d43 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -24,4 +24,12 @@
mtk-vcodec-common-y := mtk_vcodec_intr.o \
mtk_vcodec_util.o \
- mtk_vcodec_fw.o
+ mtk_vcodec_fw.o \
+
+ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU),)
+mtk-vcodec-common-y += mtk_vcodec_fw_vpu.o
+endif
+
+ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP),)
+mtk-vcodec-common-y += mtk_vcodec_fw_scp.o
+endif
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index d14bc20..145686d 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -241,7 +241,7 @@
}
dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
- dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, VPU_RST_DEC);
+ dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, DECODER);
if (IS_ERR(dev->fw_handler))
return PTR_ERR(dev->fw_handler);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index dcfa2c2..3be8a04 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -293,7 +293,7 @@
}
dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
- dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, VPU_RST_ENC);
+ dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, ENCODER);
if (IS_ERR(dev->fw_handler))
return PTR_ERR(dev->fw_handler);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
index 6c2a256..94b39ae 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
@@ -1,193 +1,29 @@
// SPDX-License-Identifier: GPL-2.0
#include "mtk_vcodec_fw.h"
+#include "mtk_vcodec_fw_priv.h"
#include "mtk_vcodec_util.h"
#include "mtk_vcodec_drv.h"
-struct mtk_vcodec_fw_ops {
- int (*load_firmware)(struct mtk_vcodec_fw *fw);
- unsigned int (*get_vdec_capa)(struct mtk_vcodec_fw *fw);
- unsigned int (*get_venc_capa)(struct mtk_vcodec_fw *fw);
- void * (*map_dm_addr)(struct mtk_vcodec_fw *fw, u32 dtcm_dmem_addr);
- int (*ipi_register)(struct mtk_vcodec_fw *fw, int id,
- mtk_vcodec_ipi_handler handler, const char *name, void *priv);
- int (*ipi_send)(struct mtk_vcodec_fw *fw, int id, void *buf,
- unsigned int len, unsigned int wait);
-};
-
-struct mtk_vcodec_fw {
- enum mtk_vcodec_fw_type type;
- const struct mtk_vcodec_fw_ops *ops;
- struct platform_device *pdev;
- struct mtk_scp *scp;
-};
-
-static int mtk_vcodec_vpu_load_firmware(struct mtk_vcodec_fw *fw)
-{
- return vpu_load_firmware(fw->pdev);
-}
-
-static unsigned int mtk_vcodec_vpu_get_vdec_capa(struct mtk_vcodec_fw *fw)
-{
- return vpu_get_vdec_hw_capa(fw->pdev);
-}
-
-static unsigned int mtk_vcodec_vpu_get_venc_capa(struct mtk_vcodec_fw *fw)
-{
- return vpu_get_venc_hw_capa(fw->pdev);
-}
-
-static void *mtk_vcodec_vpu_map_dm_addr(struct mtk_vcodec_fw *fw,
- u32 dtcm_dmem_addr)
-{
- return vpu_mapping_dm_addr(fw->pdev, dtcm_dmem_addr);
-}
-
-static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
- mtk_vcodec_ipi_handler handler,
- const char *name, void *priv)
-{
- /*
- * The handler we receive takes a void * as its first argument. We
- * cannot change this because it needs to be passed down to the rproc
- * subsystem when SCP is used. VPU takes a const argument, which is
- * more constrained, so the conversion below is safe.
- */
- ipi_handler_t handler_const = (ipi_handler_t)handler;
-
- return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
-}
-
-static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
- unsigned int len, unsigned int wait)
-{
- return vpu_ipi_send(fw->pdev, id, buf, len);
-}
-
-static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
- .load_firmware = mtk_vcodec_vpu_load_firmware,
- .get_vdec_capa = mtk_vcodec_vpu_get_vdec_capa,
- .get_venc_capa = mtk_vcodec_vpu_get_venc_capa,
- .map_dm_addr = mtk_vcodec_vpu_map_dm_addr,
- .ipi_register = mtk_vcodec_vpu_set_ipi_register,
- .ipi_send = mtk_vcodec_vpu_ipi_send,
-};
-
-static int mtk_vcodec_scp_load_firmware(struct mtk_vcodec_fw *fw)
-{
- return rproc_boot(scp_get_rproc(fw->scp));
-}
-
-static unsigned int mtk_vcodec_scp_get_vdec_capa(struct mtk_vcodec_fw *fw)
-{
- return scp_get_vdec_hw_capa(fw->scp);
-}
-
-static unsigned int mtk_vcodec_scp_get_venc_capa(struct mtk_vcodec_fw *fw)
-{
- return scp_get_venc_hw_capa(fw->scp);
-}
-
-static void *mtk_vcodec_vpu_scp_dm_addr(struct mtk_vcodec_fw *fw,
- u32 dtcm_dmem_addr)
-{
- return scp_mapping_dm_addr(fw->scp, dtcm_dmem_addr);
-}
-
-static int mtk_vcodec_scp_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
- mtk_vcodec_ipi_handler handler,
- const char *name, void *priv)
-{
- return scp_ipi_register(fw->scp, id, handler, priv);
-}
-
-static int mtk_vcodec_scp_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
- unsigned int len, unsigned int wait)
-{
- return scp_ipi_send(fw->scp, id, buf, len, wait);
-}
-
-static const struct mtk_vcodec_fw_ops mtk_vcodec_rproc_msg = {
- .load_firmware = mtk_vcodec_scp_load_firmware,
- .get_vdec_capa = mtk_vcodec_scp_get_vdec_capa,
- .get_venc_capa = mtk_vcodec_scp_get_venc_capa,
- .map_dm_addr = mtk_vcodec_vpu_scp_dm_addr,
- .ipi_register = mtk_vcodec_scp_set_ipi_register,
- .ipi_send = mtk_vcodec_scp_ipi_send,
-};
-
-static void mtk_vcodec_reset_handler(void *priv)
-{
- struct mtk_vcodec_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
-
- mtk_v4l2_err("Watchdog timeout!!");
-
- mutex_lock(&dev->dev_mutex);
- list_for_each_entry(ctx, &dev->ctx_list, list) {
- ctx->state = MTK_STATE_ABORT;
- mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
- ctx->id);
- }
- mutex_unlock(&dev->dev_mutex);
-}
-
struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
enum mtk_vcodec_fw_type type,
- enum rst_id rst_id)
+ enum mtk_vcodec_fw_use fw_use)
{
- const struct mtk_vcodec_fw_ops *ops;
- struct mtk_vcodec_fw *fw;
- struct platform_device *fw_pdev = NULL;
- struct mtk_scp *scp = NULL;
-
switch (type) {
case VPU:
- ops = &mtk_vcodec_vpu_msg;
- fw_pdev = vpu_get_plat_device(dev->plat_dev);
- if (!fw_pdev) {
- mtk_v4l2_err("firmware device is not ready");
- return ERR_PTR(-EINVAL);
- }
- vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_reset_handler,
- dev, rst_id);
- break;
+ return mtk_vcodec_fw_vpu_init(dev, fw_use);
case SCP:
- ops = &mtk_vcodec_rproc_msg;
- scp = scp_get(dev->plat_dev);
- if (!scp) {
- mtk_v4l2_err("could not get vdec scp handle");
- return ERR_PTR(-EPROBE_DEFER);
- }
- break;
+ return mtk_vcodec_fw_scp_init(dev);
default:
mtk_v4l2_err("invalid vcodec fw type");
return ERR_PTR(-EINVAL);
}
-
- fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
- if (!fw)
- return ERR_PTR(-EINVAL);
-
- fw->type = type;
- fw->ops = ops;
- fw->pdev = fw_pdev;
- fw->scp = scp;
-
- return fw;
}
EXPORT_SYMBOL_GPL(mtk_vcodec_fw_select);
void mtk_vcodec_fw_release(struct mtk_vcodec_fw *fw)
{
- switch (fw->type) {
- case VPU:
- put_device(&fw->pdev->dev);
- break;
- case SCP:
- scp_put(fw->scp);
- break;
- }
+ fw->ops->release(fw);
}
EXPORT_SYMBOL_GPL(mtk_vcodec_fw_release);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
index fadbbe6..539bb62 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
@@ -15,6 +15,11 @@
SCP,
};
+enum mtk_vcodec_fw_use {
+ DECODER,
+ ENCODER,
+};
+
struct mtk_vcodec_fw;
typedef void (*mtk_vcodec_ipi_handler) (void *data,
@@ -22,7 +27,7 @@
struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
enum mtk_vcodec_fw_type type,
- enum rst_id rst_id);
+ enum mtk_vcodec_fw_use fw_use);
void mtk_vcodec_fw_release(struct mtk_vcodec_fw *fw);
int mtk_vcodec_fw_load_firmware(struct mtk_vcodec_fw *fw);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h
new file mode 100644
index 0000000..b41e661
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_priv.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _MTK_VCODEC_FW_PRIV_H_
+#define _MTK_VCODEC_FW_PRIV_H_
+
+#include "mtk_vcodec_fw.h"
+
+struct mtk_vcodec_dev;
+
+struct mtk_vcodec_fw {
+ enum mtk_vcodec_fw_type type;
+ const struct mtk_vcodec_fw_ops *ops;
+ struct platform_device *pdev;
+ struct mtk_scp *scp;
+};
+
+struct mtk_vcodec_fw_ops {
+ int (*load_firmware)(struct mtk_vcodec_fw *fw);
+ unsigned int (*get_vdec_capa)(struct mtk_vcodec_fw *fw);
+ unsigned int (*get_venc_capa)(struct mtk_vcodec_fw *fw);
+ void *(*map_dm_addr)(struct mtk_vcodec_fw *fw, u32 dtcm_dmem_addr);
+ int (*ipi_register)(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler, const char *name,
+ void *priv);
+ int (*ipi_send)(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait);
+ void (*release)(struct mtk_vcodec_fw *fw);
+};
+
+#if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU)
+struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_use fw_use);
+#else
+static inline struct mtk_vcodec_fw *
+mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_use fw_use)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_VIDEO_MEDIATEK_VCODEC_VPU */
+
+#if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP)
+struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev);
+#else
+static inline struct mtk_vcodec_fw *
+mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_VIDEO_MEDIATEK_VCODEC_SCP */
+
+#endif /* _MTK_VCODEC_FW_PRIV_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c
new file mode 100644
index 0000000..d8e66b6
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_scp.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "mtk_vcodec_fw_priv.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_drv.h"
+
+static int mtk_vcodec_scp_load_firmware(struct mtk_vcodec_fw *fw)
+{
+ return rproc_boot(scp_get_rproc(fw->scp));
+}
+
+static unsigned int mtk_vcodec_scp_get_vdec_capa(struct mtk_vcodec_fw *fw)
+{
+ return scp_get_vdec_hw_capa(fw->scp);
+}
+
+static unsigned int mtk_vcodec_scp_get_venc_capa(struct mtk_vcodec_fw *fw)
+{
+ return scp_get_venc_hw_capa(fw->scp);
+}
+
+static void *mtk_vcodec_vpu_scp_dm_addr(struct mtk_vcodec_fw *fw,
+ u32 dtcm_dmem_addr)
+{
+ return scp_mapping_dm_addr(fw->scp, dtcm_dmem_addr);
+}
+
+static int mtk_vcodec_scp_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+{
+ return scp_ipi_register(fw->scp, id, handler, priv);
+}
+
+static int mtk_vcodec_scp_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ return scp_ipi_send(fw->scp, id, buf, len, wait);
+}
+
+static void mtk_vcodec_scp_release(struct mtk_vcodec_fw *fw)
+{
+ scp_put(fw->scp);
+}
+
+static const struct mtk_vcodec_fw_ops mtk_vcodec_rproc_msg = {
+ .load_firmware = mtk_vcodec_scp_load_firmware,
+ .get_vdec_capa = mtk_vcodec_scp_get_vdec_capa,
+ .get_venc_capa = mtk_vcodec_scp_get_venc_capa,
+ .map_dm_addr = mtk_vcodec_vpu_scp_dm_addr,
+ .ipi_register = mtk_vcodec_scp_set_ipi_register,
+ .ipi_send = mtk_vcodec_scp_ipi_send,
+ .release = mtk_vcodec_scp_release,
+};
+
+struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev)
+{
+ struct mtk_vcodec_fw *fw;
+ struct mtk_scp *scp;
+
+ scp = scp_get(dev->plat_dev);
+ if (!scp) {
+ mtk_v4l2_err("could not get vdec scp handle");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ fw->type = SCP;
+ fw->ops = &mtk_vcodec_rproc_msg;
+ fw->scp = scp;
+
+ return fw;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
new file mode 100644
index 0000000..cd27f63
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "mtk_vcodec_fw_priv.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_drv.h"
+
+static int mtk_vcodec_vpu_load_firmware(struct mtk_vcodec_fw *fw)
+{
+ return vpu_load_firmware(fw->pdev);
+}
+
+static unsigned int mtk_vcodec_vpu_get_vdec_capa(struct mtk_vcodec_fw *fw)
+{
+ return vpu_get_vdec_hw_capa(fw->pdev);
+}
+
+static unsigned int mtk_vcodec_vpu_get_venc_capa(struct mtk_vcodec_fw *fw)
+{
+ return vpu_get_venc_hw_capa(fw->pdev);
+}
+
+static void *mtk_vcodec_vpu_map_dm_addr(struct mtk_vcodec_fw *fw,
+ u32 dtcm_dmem_addr)
+{
+ return vpu_mapping_dm_addr(fw->pdev, dtcm_dmem_addr);
+}
+
+static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+{
+ /*
+ * The handler we receive takes a void * as its first argument. We
+ * cannot change this because it needs to be passed down to the rproc
+ * subsystem when SCP is used. VPU takes a const argument, which is
+ * more constrained, so the conversion below is safe.
+ */
+ ipi_handler_t handler_const = (ipi_handler_t)handler;
+
+ return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
+}
+
+static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ return vpu_ipi_send(fw->pdev, id, buf, len);
+}
+
+static void mtk_vcodec_vpu_release(struct mtk_vcodec_fw *fw)
+{
+ put_device(&fw->pdev->dev);
+}
+
+static void mtk_vcodec_vpu_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_err("Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
+ .load_firmware = mtk_vcodec_vpu_load_firmware,
+ .get_vdec_capa = mtk_vcodec_vpu_get_vdec_capa,
+ .get_venc_capa = mtk_vcodec_vpu_get_venc_capa,
+ .map_dm_addr = mtk_vcodec_vpu_map_dm_addr,
+ .ipi_register = mtk_vcodec_vpu_set_ipi_register,
+ .ipi_send = mtk_vcodec_vpu_ipi_send,
+ .release = mtk_vcodec_vpu_release,
+};
+
+struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_use fw_use)
+{
+ struct platform_device *fw_pdev;
+ struct mtk_vcodec_fw *fw;
+ enum rst_id rst_id;
+
+ switch (fw_use) {
+ case ENCODER:
+ rst_id = VPU_RST_ENC;
+ break;
+ case DECODER:
+ default:
+ rst_id = VPU_RST_DEC;
+ break;
+ }
+
+ fw_pdev = vpu_get_plat_device(dev->plat_dev);
+ if (!fw_pdev) {
+ mtk_v4l2_err("firmware device is not ready");
+ return ERR_PTR(-EINVAL);
+ }
+ vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id);
+
+ fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ fw->type = VPU;
+ fw->ops = &mtk_vcodec_vpu_msg;
+ fw->pdev = fw_pdev;
+
+ return fw;
+}
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 7b79a33..05c9fbd 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -243,8 +243,19 @@
u32 header_mode;
- u32 profile;
- u32 level;
+ struct {
+ u32 h264;
+ u32 mpeg4;
+ u32 hevc;
+ u32 vp8;
+ u32 vp9;
+ } profile;
+ struct {
+ u32 h264;
+ u32 mpeg4;
+ u32 hevc;
+ u32 vp9;
+ } level;
};
struct venus_buffer {
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 57877ea..a9538c2 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -794,7 +794,7 @@
return 0;
opp_dl_add_err:
- dev_pm_domain_detach(core->opp_pmdomain, true);
+ dev_pm_opp_detach_genpd(core->opp_table);
opp_attach_err:
if (core->pd_dl_venus) {
device_link_del(core->pd_dl_venus);
@@ -832,7 +832,7 @@
if (core->opp_dl_venus)
device_link_del(core->opp_dl_venus);
- dev_pm_domain_detach(core->opp_pmdomain, true);
+ dev_pm_opp_detach_genpd(core->opp_table);
}
static int core_get_v4(struct device *dev)
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index f8b1484..47246528 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -537,6 +537,7 @@
struct hfi_quantization quant;
struct hfi_quantization_range quant_range;
u32 ptype, rate_control, bitrate;
+ u32 profile, level;
int ret;
ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
@@ -684,7 +685,35 @@
if (ret)
return ret;
- ret = venus_helper_set_profile_level(inst, ctr->profile, ctr->level);
+ switch (inst->hfi_codec) {
+ case HFI_VIDEO_CODEC_H264:
+ profile = ctr->profile.h264;
+ level = ctr->level.h264;
+ break;
+ case HFI_VIDEO_CODEC_MPEG4:
+ profile = ctr->profile.mpeg4;
+ level = ctr->level.mpeg4;
+ break;
+ case HFI_VIDEO_CODEC_VP8:
+ profile = ctr->profile.vp8;
+ level = 0;
+ break;
+ case HFI_VIDEO_CODEC_VP9:
+ profile = ctr->profile.vp9;
+ level = ctr->level.vp9;
+ break;
+ case HFI_VIDEO_CODEC_HEVC:
+ profile = ctr->profile.hevc;
+ level = ctr->level.hevc;
+ break;
+ case HFI_VIDEO_CODEC_MPEG2:
+ default:
+ profile = 0;
+ level = 0;
+ break;
+ }
+
+ ret = venus_helper_set_profile_level(inst, profile, level);
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 0708b3b..cf860e6 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -103,15 +103,25 @@
ctr->h264_entropy_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ ctr->profile.mpeg4 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctr->profile.h264 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ ctr->profile.hevc = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
- ctr->profile = ctrl->val;
+ ctr->profile.vp8 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ctr->level.mpeg4 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctr->level.h264 = ctrl->val;
+ break;
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
- ctr->level = ctrl->val;
+ ctr->level.hevc = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
ctr->h264_i_qp = ctrl->val;
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 5051a5e..65a136c 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -151,15 +151,12 @@
{
u32 val;
- /* Period of raw software sampling in ns */
- val = DIV_ROUND_CLOSEST(1000000000ul,
- clk_get_rate(ir->bus) / ir->data->div);
-
/*
* Period for software decoder used in the
* unit of raw software sampling
*/
- val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val);
+ val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus),
+ USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE);
dev_dbg(ir->dev, "@pwm clk = \t%lu\n",
clk_get_rate(ir->bus) / ir->data->div);
@@ -412,7 +409,7 @@
mtk_irq_enable(ir, MTK_IRINT_EN);
dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n",
- DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000));
+ MTK_IR_SAMPLE);
return 0;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index 74b0549..fc64d0c 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -4,36 +4,43 @@
* validate the existing APIs in the media subsystem. It can also aid
* developers working on userspace applications.
*
- * When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner' and 'dvb_vidtv_demod'.
+ * When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner'
+ * and 'dvb_vidtv_demod'.
*
* Copyright (C) 2020 Daniel W. S. Almeida
*/
+#include <linux/dev_printk.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/dev_printk.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "vidtv_bridge.h"
-#include "vidtv_demod.h"
-#include "vidtv_tuner.h"
-#include "vidtv_ts.h"
-#include "vidtv_mux.h"
#include "vidtv_common.h"
+#include "vidtv_demod.h"
+#include "vidtv_mux.h"
+#include "vidtv_ts.h"
+#include "vidtv_tuner.h"
-//#define MUX_BUF_MAX_SZ
-//#define MUX_BUF_MIN_SZ
+#define MUX_BUF_MIN_SZ 90164
+#define MUX_BUF_MAX_SZ (MUX_BUF_MIN_SZ * 10)
#define TUNER_DEFAULT_ADDR 0x68
#define DEMOD_DEFAULT_ADDR 0x60
+#define VIDTV_DEFAULT_NETWORK_ID 0xff44
+#define VIDTV_DEFAULT_NETWORK_NAME "LinuxTV.org"
+#define VIDTV_DEFAULT_TS_ID 0x4081
-/* LNBf fake parameters: ranges used by an Universal (extended) European LNBf */
-#define LNB_CUT_FREQUENCY 11700000
-#define LNB_LOW_FREQ 9750000
-#define LNB_HIGH_FREQ 10600000
-
+/*
+ * The LNBf fake parameters here are the ranges used by an
+ * Universal (extended) European LNBf, which is likely the most common LNBf
+ * found on Satellite digital TV system nowadays.
+ */
+#define LNB_CUT_FREQUENCY 11700000 /* high IF frequency */
+#define LNB_LOW_FREQ 9750000 /* low IF frequency */
+#define LNB_HIGH_FREQ 10600000 /* transition frequency */
static unsigned int drop_tslock_prob_on_low_snr;
module_param(drop_tslock_prob_on_low_snr, uint, 0);
@@ -92,7 +99,8 @@
static unsigned int pcr_period_msec = 40;
module_param(pcr_period_msec, uint, 0);
-MODULE_PARM_DESC(pcr_period_msec, "How often to send PCR packets. Default: 40ms");
+MODULE_PARM_DESC(pcr_period_msec,
+ "How often to send PCR packets. Default: 40ms");
static unsigned int mux_rate_kbytes_sec = 4096;
module_param(mux_rate_kbytes_sec, uint, 0);
@@ -104,16 +112,14 @@
static unsigned int mux_buf_sz_pkts;
module_param(mux_buf_sz_pkts, uint, 0);
-MODULE_PARM_DESC(mux_buf_sz_pkts, "Size for the internal mux buffer in multiples of 188 bytes");
-
-#define MUX_BUF_MIN_SZ 90164
-#define MUX_BUF_MAX_SZ (MUX_BUF_MIN_SZ * 10)
+MODULE_PARM_DESC(mux_buf_sz_pkts,
+ "Size for the internal mux buffer in multiples of 188 bytes");
static u32 vidtv_bridge_mux_buf_sz_for_mux_rate(void)
{
u32 max_elapsed_time_msecs = VIDTV_MAX_SLEEP_USECS / USEC_PER_MSEC;
- u32 nbytes_expected;
u32 mux_buf_sz = mux_buf_sz_pkts * TS_PACKET_LEN;
+ u32 nbytes_expected;
nbytes_expected = mux_rate_kbytes_sec;
nbytes_expected *= max_elapsed_time_msecs;
@@ -143,14 +149,12 @@
FE_HAS_LOCK);
}
-static void
-vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
+/*
+ * called on a separate thread by the mux when new packets become available
+ */
+static void vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
{
- /*
- * called on a separate thread by the mux when new packets become
- * available
- */
- struct vidtv_dvb *dvb = (struct vidtv_dvb *)priv;
+ struct vidtv_dvb *dvb = priv;
/* drop packets if we lose the lock */
if (vidtv_bridge_check_demod_lock(dvb, 0))
@@ -159,7 +163,17 @@
static int vidtv_start_streaming(struct vidtv_dvb *dvb)
{
- struct vidtv_mux_init_args mux_args = {0};
+ struct vidtv_mux_init_args mux_args = {
+ .mux_rate_kbytes_sec = mux_rate_kbytes_sec,
+ .on_new_packets_available_cb = vidtv_bridge_on_new_pkts_avail,
+ .pcr_period_usecs = pcr_period_msec * USEC_PER_MSEC,
+ .si_period_usecs = si_period_msec * USEC_PER_MSEC,
+ .pcr_pid = pcr_pid,
+ .transport_stream_id = VIDTV_DEFAULT_TS_ID,
+ .network_id = VIDTV_DEFAULT_NETWORK_ID,
+ .network_name = VIDTV_DEFAULT_NETWORK_NAME,
+ .priv = dvb,
+ };
struct device *dev = &dvb->pdev->dev;
u32 mux_buf_sz;
@@ -168,19 +182,17 @@
return 0;
}
- mux_buf_sz = (mux_buf_sz_pkts) ? mux_buf_sz_pkts : vidtv_bridge_mux_buf_sz_for_mux_rate();
+ if (mux_buf_sz_pkts)
+ mux_buf_sz = mux_buf_sz_pkts;
+ else
+ mux_buf_sz = vidtv_bridge_mux_buf_sz_for_mux_rate();
- mux_args.mux_rate_kbytes_sec = mux_rate_kbytes_sec;
- mux_args.on_new_packets_available_cb = vidtv_bridge_on_new_pkts_avail;
- mux_args.mux_buf_sz = mux_buf_sz;
- mux_args.pcr_period_usecs = pcr_period_msec * 1000;
- mux_args.si_period_usecs = si_period_msec * 1000;
- mux_args.pcr_pid = pcr_pid;
- mux_args.transport_stream_id = VIDTV_DEFAULT_TS_ID;
- mux_args.priv = dvb;
+ mux_args.mux_buf_sz = mux_buf_sz;
dvb->streaming = true;
- dvb->mux = vidtv_mux_init(dvb->fe[0], dev, mux_args);
+ dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args);
+ if (!dvb->mux)
+ return -ENOMEM;
vidtv_mux_start_thread(dvb->mux);
dev_dbg_ratelimited(dev, "Started streaming\n");
@@ -204,8 +216,8 @@
{
struct dvb_demux *demux = feed->demux;
struct vidtv_dvb *dvb = demux->priv;
- int rc;
int ret;
+ int rc;
if (!demux->dmx.frontend)
return -EINVAL;
@@ -243,9 +255,9 @@
static struct dvb_frontend *vidtv_get_frontend_ptr(struct i2c_client *c)
{
- /* the demod will set this when its probe function runs */
struct vidtv_demod_state *state = i2c_get_clientdata(c);
+ /* the demod will set this when its probe function runs */
return &state->frontend;
}
@@ -253,6 +265,11 @@
struct i2c_msg msgs[],
int num)
{
+ /*
+ * Right now, this virtual driver doesn't really send or receive
+ * messages from I2C. A real driver will require an implementation
+ * here.
+ */
return 0;
}
@@ -320,11 +337,10 @@
static int vidtv_bridge_probe_demod(struct vidtv_dvb *dvb, u32 n)
{
- struct vidtv_demod_config cfg = {};
-
- cfg.drop_tslock_prob_on_low_snr = drop_tslock_prob_on_low_snr;
- cfg.recover_tslock_prob_on_good_snr = recover_tslock_prob_on_good_snr;
-
+ struct vidtv_demod_config cfg = {
+ .drop_tslock_prob_on_low_snr = drop_tslock_prob_on_low_snr,
+ .recover_tslock_prob_on_good_snr = recover_tslock_prob_on_good_snr,
+ };
dvb->i2c_client_demod[n] = dvb_module_probe("dvb_vidtv_demod",
NULL,
&dvb->i2c_adapter,
@@ -343,14 +359,14 @@
static int vidtv_bridge_probe_tuner(struct vidtv_dvb *dvb, u32 n)
{
- struct vidtv_tuner_config cfg = {};
+ struct vidtv_tuner_config cfg = {
+ .fe = dvb->fe[n],
+ .mock_power_up_delay_msec = mock_power_up_delay_msec,
+ .mock_tune_delay_msec = mock_tune_delay_msec,
+ };
u32 freq;
int i;
- cfg.fe = dvb->fe[n];
- cfg.mock_power_up_delay_msec = mock_power_up_delay_msec;
- cfg.mock_tune_delay_msec = mock_tune_delay_msec;
-
/* TODO: check if the frequencies are at a valid range */
memcpy(cfg.vidtv_valid_dvb_t_freqs,
@@ -389,9 +405,7 @@
static int vidtv_bridge_dvb_init(struct vidtv_dvb *dvb)
{
- int ret;
- int i;
- int j;
+ int ret, i, j;
ret = vidtv_bridge_i2c_register_adap(dvb);
if (ret < 0)
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.h b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
index 78fe847..2528ada 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
@@ -20,9 +20,11 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+
#include <media/dmxdev.h>
#include <media/dvb_demux.h>
#include <media/dvb_frontend.h>
+
#include "vidtv_mux.h"
/**
@@ -32,7 +34,7 @@
* @adapter: Represents a DTV adapter. See 'dvb_register_adapter'.
* @demux: The demux used by the dvb_dmx_swfilter_packets() call.
* @dmx_dev: Represents a demux device.
- * @dmx_frontend: The frontends associated with the demux.
+ * @dmx_fe: The frontends associated with the demux.
* @i2c_adapter: The i2c_adapter associated with the bridge driver.
* @i2c_client_demod: The i2c_clients associated with the demodulator modules.
* @i2c_client_tuner: The i2c_clients associated with the tuner modules.
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c
index f2b97cf..7838e62 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_channel.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c
@@ -9,6 +9,7 @@
* When vidtv boots, it will create some hardcoded channels.
* Their services will be concatenated to populate the SDT.
* Their programs will be concatenated to populate the PAT
+ * Their events will be concatenated to populate the EIT
* For each program in the PAT, a PMT section will be created
* The PMT section for a channel will be assigned its streams.
* Every stream will have its corresponding encoder polled to produce TS packets
@@ -18,22 +19,22 @@
* Copyright (C) 2020 Daniel W. S. Almeida
*/
-#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/dev_printk.h>
#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include "vidtv_channel.h"
-#include "vidtv_psi.h"
+#include "vidtv_common.h"
#include "vidtv_encoder.h"
#include "vidtv_mux.h"
-#include "vidtv_common.h"
+#include "vidtv_psi.h"
#include "vidtv_s302m.h"
static void vidtv_channel_encoder_destroy(struct vidtv_encoder *e)
{
- struct vidtv_encoder *curr = e;
struct vidtv_encoder *tmp = NULL;
+ struct vidtv_encoder *curr = e;
while (curr) {
/* forward the call to the derived type */
@@ -44,55 +45,88 @@
}
#define ENCODING_ISO8859_15 "\x0b"
+#define TS_NIT_PID 0x10
+/*
+ * init an audio only channel with a s302m encoder
+ */
struct vidtv_channel
*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id)
{
- /*
- * init an audio only channel with a s302m encoder
- */
+ const __be32 s302m_fid = cpu_to_be32(VIDTV_S302M_FORMAT_IDENTIFIER);
+ char *event_text = ENCODING_ISO8859_15 "Bagatelle No. 25 in A minor for solo piano, also known as F\xfcr Elise, composed by Ludwig van Beethoven";
+ char *event_name = ENCODING_ISO8859_15 "Ludwig van Beethoven: F\xfcr Elise";
+ struct vidtv_s302m_encoder_init_args encoder_args = {};
+ char *iso_language_code = ENCODING_ISO8859_15 "eng";
+ char *provider = ENCODING_ISO8859_15 "LinuxTV.org";
+ char *name = ENCODING_ISO8859_15 "Beethoven";
+ const u16 s302m_es_pid = 0x111; /* packet id for the ES */
+ const u16 s302m_program_pid = 0x101; /* packet id for PMT*/
const u16 s302m_service_id = 0x880;
const u16 s302m_program_num = 0x880;
- const u16 s302m_program_pid = 0x101; /* packet id for PMT*/
- const u16 s302m_es_pid = 0x111; /* packet id for the ES */
- const __be32 s302m_fid = cpu_to_be32(VIDTV_S302M_FORMAT_IDENTIFIER);
+ const u16 s302m_beethoven_event_id = 1;
+ struct vidtv_channel *s302m;
- char *name = ENCODING_ISO8859_15 "Beethoven";
- char *provider = ENCODING_ISO8859_15 "LinuxTV.org";
-
- struct vidtv_channel *s302m = kzalloc(sizeof(*s302m), GFP_KERNEL);
- struct vidtv_s302m_encoder_init_args encoder_args = {};
+ s302m = kzalloc(sizeof(*s302m), GFP_KERNEL);
+ if (!s302m)
+ return NULL;
s302m->name = kstrdup(name, GFP_KERNEL);
+ if (!s302m->name)
+ goto free_s302m;
- s302m->service = vidtv_psi_sdt_service_init(NULL, s302m_service_id);
+ s302m->service = vidtv_psi_sdt_service_init(NULL, s302m_service_id, false, true);
+ if (!s302m->service)
+ goto free_name;
s302m->service->descriptor = (struct vidtv_psi_desc *)
vidtv_psi_service_desc_init(NULL,
- DIGITAL_TELEVISION_SERVICE,
+ DIGITAL_RADIO_SOUND_SERVICE,
name,
provider);
+ if (!s302m->service->descriptor)
+ goto free_service;
s302m->transport_stream_id = transport_stream_id;
s302m->program = vidtv_psi_pat_program_init(NULL,
s302m_service_id,
s302m_program_pid);
+ if (!s302m->program)
+ goto free_service;
s302m->program_num = s302m_program_num;
s302m->streams = vidtv_psi_pmt_stream_init(NULL,
STREAM_PRIVATE_DATA,
s302m_es_pid);
+ if (!s302m->streams)
+ goto free_program;
s302m->streams->descriptor = (struct vidtv_psi_desc *)
vidtv_psi_registration_desc_init(NULL,
s302m_fid,
NULL,
0);
+ if (!s302m->streams->descriptor)
+ goto free_streams;
+
encoder_args.es_pid = s302m_es_pid;
s302m->encoders = vidtv_s302m_encoder_init(encoder_args);
+ if (!s302m->encoders)
+ goto free_streams;
+
+ s302m->events = vidtv_psi_eit_event_init(NULL, s302m_beethoven_event_id);
+ if (!s302m->events)
+ goto free_encoders;
+ s302m->events->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_short_event_desc_init(NULL,
+ iso_language_code,
+ event_name,
+ event_text);
+ if (!s302m->events->descriptor)
+ goto free_events;
if (head) {
while (head->next)
@@ -102,6 +136,68 @@
}
return s302m;
+
+free_events:
+ vidtv_psi_eit_event_destroy(s302m->events);
+free_encoders:
+ vidtv_s302m_encoder_destroy(s302m->encoders);
+free_streams:
+ vidtv_psi_pmt_stream_destroy(s302m->streams);
+free_program:
+ vidtv_psi_pat_program_destroy(s302m->program);
+free_service:
+ vidtv_psi_sdt_service_destroy(s302m->service);
+free_name:
+ kfree(s302m->name);
+free_s302m:
+ kfree(s302m);
+
+ return NULL;
+}
+
+static struct vidtv_psi_table_eit_event
+*vidtv_channel_eit_event_cat_into_new(struct vidtv_mux *m)
+{
+ /* Concatenate the events */
+ const struct vidtv_channel *cur_chnl = m->channels;
+ struct vidtv_psi_table_eit_event *curr = NULL;
+ struct vidtv_psi_table_eit_event *head = NULL;
+ struct vidtv_psi_table_eit_event *tail = NULL;
+ struct vidtv_psi_desc *desc = NULL;
+ u16 event_id;
+
+ if (!cur_chnl)
+ return NULL;
+
+ while (cur_chnl) {
+ curr = cur_chnl->events;
+
+ if (!curr)
+ dev_warn_ratelimited(m->dev,
+ "No events found for channel %s\n",
+ cur_chnl->name);
+
+ while (curr) {
+ event_id = be16_to_cpu(curr->event_id);
+ tail = vidtv_psi_eit_event_init(tail, event_id);
+ if (!tail) {
+ vidtv_psi_eit_event_destroy(head);
+ return NULL;
+ }
+
+ desc = vidtv_psi_desc_clone(curr->descriptor);
+ vidtv_psi_desc_assign(&tail->descriptor, desc);
+
+ if (!head)
+ head = tail;
+
+ curr = curr->next;
+ }
+
+ cur_chnl = cur_chnl->next;
+ }
+
+ return head;
}
static struct vidtv_psi_table_sdt_service
@@ -125,13 +221,21 @@
if (!curr)
dev_warn_ratelimited(m->dev,
- "No services found for channel %s\n", cur_chnl->name);
+ "No services found for channel %s\n",
+ cur_chnl->name);
while (curr) {
service_id = be16_to_cpu(curr->service_id);
- tail = vidtv_psi_sdt_service_init(tail, service_id);
+ tail = vidtv_psi_sdt_service_init(tail,
+ service_id,
+ curr->EIT_schedule,
+ curr->EIT_present_following);
+ if (!tail)
+ goto free;
desc = vidtv_psi_desc_clone(curr->descriptor);
+ if (!desc)
+ goto free_tail;
vidtv_psi_desc_assign(&tail->descriptor, desc);
if (!head)
@@ -144,6 +248,12 @@
}
return head;
+
+free_tail:
+ vidtv_psi_sdt_service_destroy(tail);
+free:
+ vidtv_psi_sdt_service_destroy(head);
+ return NULL;
}
static struct vidtv_psi_table_pat_program*
@@ -174,6 +284,10 @@
tail = vidtv_psi_pat_program_init(tail,
serv_id,
pid);
+ if (!tail) {
+ vidtv_psi_pat_program_destroy(head);
+ return NULL;
+ }
if (!head)
head = tail;
@@ -183,30 +297,30 @@
cur_chnl = cur_chnl->next;
}
+ /* Add the NIT table */
+ vidtv_psi_pat_program_init(tail, 0, TS_NIT_PID);
return head;
}
+/*
+ * Match channels to their respective PMT sections, then assign the
+ * streams
+ */
static void
vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
struct vidtv_psi_table_pmt **sections,
u32 nsections)
{
- /*
- * Match channels to their respective PMT sections, then assign the
- * streams
- */
struct vidtv_psi_table_pmt *curr_section = NULL;
- struct vidtv_channel *cur_chnl = channels;
-
- struct vidtv_psi_table_pmt_stream *s = NULL;
struct vidtv_psi_table_pmt_stream *head = NULL;
struct vidtv_psi_table_pmt_stream *tail = NULL;
-
+ struct vidtv_psi_table_pmt_stream *s = NULL;
+ struct vidtv_channel *cur_chnl = channels;
struct vidtv_psi_desc *desc = NULL;
- u32 j;
- u16 curr_id;
u16 e_pid; /* elementary stream pid */
+ u16 curr_id;
+ u32 j;
while (cur_chnl) {
for (j = 0; j < nsections; ++j) {
@@ -232,7 +346,8 @@
head = tail;
desc = vidtv_psi_desc_clone(s->descriptor);
- vidtv_psi_desc_assign(&tail->descriptor, desc);
+ vidtv_psi_desc_assign(&tail->descriptor,
+ desc);
s = s->next;
}
@@ -246,17 +361,103 @@
}
}
-void vidtv_channel_si_init(struct vidtv_mux *m)
+static void
+vidtv_channel_destroy_service_list(struct vidtv_psi_desc_service_list_entry *e)
{
+ struct vidtv_psi_desc_service_list_entry *tmp;
+
+ while (e) {
+ tmp = e;
+ e = e->next;
+ kfree(tmp);
+ }
+}
+
+static struct vidtv_psi_desc_service_list_entry
+*vidtv_channel_build_service_list(struct vidtv_psi_table_sdt_service *s)
+{
+ struct vidtv_psi_desc_service_list_entry *curr_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *head_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *prev_e = NULL;
+ struct vidtv_psi_desc *desc = s->descriptor;
+ struct vidtv_psi_desc_service *s_desc;
+
+ while (s) {
+ while (desc) {
+ if (s->descriptor->type != SERVICE_DESCRIPTOR)
+ goto next_desc;
+
+ s_desc = (struct vidtv_psi_desc_service *)desc;
+
+ curr_e = kzalloc(sizeof(*curr_e), GFP_KERNEL);
+ if (!curr_e) {
+ vidtv_channel_destroy_service_list(head_e);
+ return NULL;
+ }
+
+ curr_e->service_id = s->service_id;
+ curr_e->service_type = s_desc->service_type;
+
+ if (!head_e)
+ head_e = curr_e;
+ if (prev_e)
+ prev_e->next = curr_e;
+
+ prev_e = curr_e;
+
+next_desc:
+ desc = desc->next;
+ }
+ s = s->next;
+ }
+ return head_e;
+}
+
+int vidtv_channel_si_init(struct vidtv_mux *m)
+{
+ struct vidtv_psi_desc_service_list_entry *service_list = NULL;
struct vidtv_psi_table_pat_program *programs = NULL;
struct vidtv_psi_table_sdt_service *services = NULL;
+ struct vidtv_psi_table_eit_event *events = NULL;
m->si.pat = vidtv_psi_pat_table_init(m->transport_stream_id);
+ if (!m->si.pat)
+ return -ENOMEM;
- m->si.sdt = vidtv_psi_sdt_table_init(m->transport_stream_id);
+ m->si.sdt = vidtv_psi_sdt_table_init(m->network_id,
+ m->transport_stream_id);
+ if (!m->si.sdt)
+ goto free_pat;
programs = vidtv_channel_pat_prog_cat_into_new(m);
+ if (!programs)
+ goto free_sdt;
services = vidtv_channel_sdt_serv_cat_into_new(m);
+ if (!services)
+ goto free_programs;
+
+ events = vidtv_channel_eit_event_cat_into_new(m);
+ if (!events)
+ goto free_services;
+
+ /* look for a service descriptor for every service */
+ service_list = vidtv_channel_build_service_list(services);
+ if (!service_list)
+ goto free_events;
+
+ /* use these descriptors to build the NIT */
+ m->si.nit = vidtv_psi_nit_table_init(m->network_id,
+ m->transport_stream_id,
+ m->network_name,
+ service_list);
+ if (!m->si.nit)
+ goto free_service_list;
+
+ m->si.eit = vidtv_psi_eit_table_init(m->network_id,
+ m->transport_stream_id,
+ programs->service_id);
+ if (!m->si.eit)
+ goto free_nit;
/* assemble all programs and assign to PAT */
vidtv_psi_pat_program_assign(m->si.pat, programs);
@@ -264,31 +465,65 @@
/* assemble all services and assign to SDT */
vidtv_psi_sdt_service_assign(m->si.sdt, services);
- m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat, m->pcr_pid);
+ /* assemble all events and assign to EIT */
+ vidtv_psi_eit_event_assign(m->si.eit, events);
+
+ m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat,
+ m->pcr_pid);
+ if (!m->si.pmt_secs)
+ goto free_eit;
vidtv_channel_pmt_match_sections(m->channels,
m->si.pmt_secs,
- m->si.pat->programs);
+ m->si.pat->num_pmt);
+
+ vidtv_channel_destroy_service_list(service_list);
+
+ return 0;
+
+free_eit:
+ vidtv_psi_eit_table_destroy(m->si.eit);
+free_nit:
+ vidtv_psi_nit_table_destroy(m->si.nit);
+free_service_list:
+ vidtv_channel_destroy_service_list(service_list);
+free_events:
+ vidtv_psi_eit_event_destroy(events);
+free_services:
+ vidtv_psi_sdt_service_destroy(services);
+free_programs:
+ vidtv_psi_pat_program_destroy(programs);
+free_sdt:
+ vidtv_psi_sdt_table_destroy(m->si.sdt);
+free_pat:
+ vidtv_psi_pat_table_destroy(m->si.pat);
+ return 0;
}
void vidtv_channel_si_destroy(struct vidtv_mux *m)
{
u32 i;
- u16 num_programs = m->si.pat->programs;
+
+ for (i = 0; i < m->si.pat->num_pmt; ++i)
+ vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
vidtv_psi_pat_table_destroy(m->si.pat);
- for (i = 0; i < num_programs; ++i)
- vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
-
kfree(m->si.pmt_secs);
vidtv_psi_sdt_table_destroy(m->si.sdt);
+ vidtv_psi_nit_table_destroy(m->si.nit);
+ vidtv_psi_eit_table_destroy(m->si.eit);
}
-void vidtv_channels_init(struct vidtv_mux *m)
+int vidtv_channels_init(struct vidtv_mux *m)
{
/* this is the place to add new 'channels' for vidtv */
m->channels = vidtv_channel_s302m_init(NULL, m->transport_stream_id);
+
+ if (!m->channels)
+ return -ENOMEM;
+
+ return 0;
}
void vidtv_channels_destroy(struct vidtv_mux *m)
@@ -302,6 +537,7 @@
vidtv_psi_pat_program_destroy(curr->program);
vidtv_psi_pmt_stream_destroy(curr->streams);
vidtv_channel_encoder_destroy(curr->encoders);
+ vidtv_psi_eit_event_destroy(curr->events);
tmp = curr;
curr = curr->next;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.h b/drivers/media/test-drivers/vidtv/vidtv_channel.h
index 2c3cba4..fff2e50 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_channel.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.h
@@ -9,6 +9,7 @@
* When vidtv boots, it will create some hardcoded channels.
* Their services will be concatenated to populate the SDT.
* Their programs will be concatenated to populate the PAT
+ * Their events will be concatenated to populate the EIT
* For each program in the PAT, a PMT section will be created
* The PMT section for a channel will be assigned its streams.
* Every stream will have its corresponding encoder polled to produce TS packets
@@ -22,9 +23,10 @@
#define VIDTV_CHANNEL_H
#include <linux/types.h>
-#include "vidtv_psi.h"
+
#include "vidtv_encoder.h"
#include "vidtv_mux.h"
+#include "vidtv_psi.h"
/**
* struct vidtv_channel - A 'channel' abstraction
@@ -37,6 +39,7 @@
* Every stream will have its corresponding encoder polled to produce TS packets
* These packets may be interleaved by the mux and then delivered to the bridge
*
+ * @name: name of the channel
* @transport_stream_id: a number to identify the TS, chosen at will.
* @service: A _single_ service. Will be concatenated into the SDT.
* @program_num: The link between PAT, PMT and SDT.
@@ -44,6 +47,7 @@
* Will be concatenated into the PAT.
* @streams: A stream loop used to populate the PMT section for 'program'
* @encoders: A encoder loop. There must be one encoder for each stream.
+ * @events: Optional event information. This will feed into the EIT.
* @next: Optionally chain this channel.
*/
struct vidtv_channel {
@@ -54,6 +58,7 @@
struct vidtv_psi_table_pat_program *program;
struct vidtv_psi_table_pmt_stream *streams;
struct vidtv_encoder *encoders;
+ struct vidtv_psi_table_eit_event *events;
struct vidtv_channel *next;
};
@@ -61,14 +66,14 @@
* vidtv_channel_si_init - Init the PSI tables from the channels in the mux
* @m: The mux containing the channels.
*/
-void vidtv_channel_si_init(struct vidtv_mux *m);
+int vidtv_channel_si_init(struct vidtv_mux *m);
void vidtv_channel_si_destroy(struct vidtv_mux *m);
/**
* vidtv_channels_init - Init hardcoded, fake 'channels'.
* @m: The mux to store the channels into.
*/
-void vidtv_channels_init(struct vidtv_mux *m);
+int vidtv_channels_init(struct vidtv_mux *m);
struct vidtv_channel
*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id);
void vidtv_channels_destroy(struct vidtv_mux *m);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_common.h b/drivers/media/test-drivers/vidtv/vidtv_common.h
index 818e7f2..42f63fd 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_common.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_common.h
@@ -16,7 +16,6 @@
#define CLOCK_UNIT_27MHZ 27000000
#define VIDTV_SLEEP_USECS 10000
#define VIDTV_MAX_SLEEP_USECS (2 * VIDTV_SLEEP_USECS)
-#define VIDTV_DEFAULT_TS_ID 0x744
u32 vidtv_memcpy(void *to,
size_t to_offset,
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
index eba7fe1..b7823d9 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/workqueue.h>
+
#include <media/dvb_frontend.h>
#include "vidtv_demod.h"
@@ -192,7 +193,6 @@
c->cnr.stat[0].svalue = state->tuner_cnr;
c->cnr.stat[0].svalue -= prandom_u32_max(state->tuner_cnr / 50);
-
}
static int vidtv_demod_read_status(struct dvb_frontend *fe,
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.h b/drivers/media/test-drivers/vidtv/vidtv_demod.h
index 87651b0..2b84046 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.h
@@ -12,6 +12,7 @@
#define VIDTV_DEMOD_H
#include <linux/dvb/frontend.h>
+
#include <media/dvb_frontend.h>
/**
@@ -19,6 +20,9 @@
* modulation and fec_inner
* @modulation: see enum fe_modulation
* @fec: see enum fe_fec_rate
+ * @cnr_ok: S/N threshold to consider the signal as OK. Below that, there's
+ * a chance of losing sync.
+ * @cnr_good: S/N threshold to consider the signal strong.
*
* This struct matches values for 'good' and 'ok' CNRs given the combination
* of modulation and fec_inner in use. We might simulate some noise if the
@@ -52,13 +56,8 @@
* struct vidtv_demod_state - The demodulator state
* @frontend: The frontend structure allocated by the demod.
* @config: The config used to init the demod.
- * @poll_snr: The task responsible for periodically checking the simulated
- * signal quality, eventually dropping or reacquiring the TS lock.
* @status: the demod status.
- * @cold_start: Whether the demod has not been init yet.
- * @poll_snr_thread_running: Whether the task responsible for periodically
- * checking the simulated signal quality is running.
- * @poll_snr_thread_restart: Whether we should restart the poll_snr task.
+ * @tuner_cnr: current S/N ratio for the signal carrier
*/
struct vidtv_demod_state {
struct dvb_frontend frontend;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_encoder.h b/drivers/media/test-drivers/vidtv/vidtv_encoder.h
index 65d81da..50e3cf4 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_encoder.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_encoder.h
@@ -28,7 +28,7 @@
struct vidtv_access_unit *next;
};
-/* Some musical notes, used by a tone generator */
+/* Some musical notes, used by a tone generator. Values are in Hz */
enum musical_notes {
NOTE_SILENT = 0,
@@ -103,14 +103,16 @@
* @encoder_buf_sz: The encoder buffer size, in bytes
* @encoder_buf_offset: Our byte position in the encoder buffer.
* @sample_count: How many samples we have encoded in total.
+ * @access_units: encoder payload units, used for clock references
* @src_buf: The source of raw data to be encoded, encoder might set a
* default if null.
+ * @src_buf_sz: size of @src_buf.
* @src_buf_offset: Our position in the source buffer.
* @is_video_encoder: Whether this a video encoder (as opposed to audio)
* @ctx: Encoder-specific state.
* @stream_id: Examples: Audio streams (0xc0-0xdf), Video streams
* (0xe0-0xef).
- * @es_id: The TS PID to use for the elementary stream in this encoder.
+ * @es_pid: The TS PID to use for the elementary stream in this encoder.
* @encode: Prepare enough AUs for the given amount of time.
* @clear: Clear the encoder output.
* @sync: Attempt to synchronize with this encoder.
@@ -131,9 +133,6 @@
u32 encoder_buf_offset;
u64 sample_count;
- int last_duration;
- int note_offset;
- enum musical_notes last_tone;
struct vidtv_access_unit *access_units;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
index 082740a..b51e6a3 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_mux.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
@@ -12,23 +12,23 @@
* Copyright (C) 2020 Daniel W. S. Almeida
*/
-#include <linux/types.h>
-#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
-#include <linux/dev_printk.h>
-#include <linux/ratelimit.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
#include <linux/math64.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
-#include "vidtv_mux.h"
-#include "vidtv_ts.h"
-#include "vidtv_pes.h"
-#include "vidtv_encoder.h"
#include "vidtv_channel.h"
#include "vidtv_common.h"
+#include "vidtv_encoder.h"
+#include "vidtv_mux.h"
+#include "vidtv_pes.h"
#include "vidtv_psi.h"
+#include "vidtv_ts.h"
static struct vidtv_mux_pid_ctx
*vidtv_mux_get_pid_ctx(struct vidtv_mux *m, u16 pid)
@@ -47,33 +47,56 @@
struct vidtv_mux_pid_ctx *ctx;
ctx = vidtv_mux_get_pid_ctx(m, pid);
-
if (ctx)
- goto end;
+ return ctx;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
ctx->pid = pid;
ctx->cc = 0;
hash_add(m->pid_ctx, &ctx->h, pid);
-end:
return ctx;
}
-static void vidtv_mux_pid_ctx_init(struct vidtv_mux *m)
+static void vidtv_mux_pid_ctx_destroy(struct vidtv_mux *m)
+{
+ struct vidtv_mux_pid_ctx *ctx;
+ struct hlist_node *tmp;
+ int bkt;
+
+ hash_for_each_safe(m->pid_ctx, bkt, tmp, ctx, h) {
+ hash_del(&ctx->h);
+ kfree(ctx);
+ }
+}
+
+static int vidtv_mux_pid_ctx_init(struct vidtv_mux *m)
{
struct vidtv_psi_table_pat_program *p = m->si.pat->program;
u16 pid;
hash_init(m->pid_ctx);
/* push the pcr pid ctx */
- vidtv_mux_create_pid_ctx_once(m, m->pcr_pid);
- /* push the null packet pid ctx */
- vidtv_mux_create_pid_ctx_once(m, TS_NULL_PACKET_PID);
+ if (!vidtv_mux_create_pid_ctx_once(m, m->pcr_pid))
+ return -ENOMEM;
+ /* push the NULL packet pid ctx */
+ if (!vidtv_mux_create_pid_ctx_once(m, TS_NULL_PACKET_PID))
+ goto free;
/* push the PAT pid ctx */
- vidtv_mux_create_pid_ctx_once(m, VIDTV_PAT_PID);
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_PAT_PID))
+ goto free;
/* push the SDT pid ctx */
- vidtv_mux_create_pid_ctx_once(m, VIDTV_SDT_PID);
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_SDT_PID))
+ goto free;
+ /* push the NIT pid ctx */
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_NIT_PID))
+ goto free;
+ /* push the EIT pid ctx */
+ if (!vidtv_mux_create_pid_ctx_once(m, VIDTV_EIT_PID))
+ goto free;
/* add a ctx for all PMT sections */
while (p) {
@@ -81,18 +104,12 @@
vidtv_mux_create_pid_ctx_once(m, pid);
p = p->next;
}
-}
-static void vidtv_mux_pid_ctx_destroy(struct vidtv_mux *m)
-{
- int bkt;
- struct vidtv_mux_pid_ctx *ctx;
- struct hlist_node *tmp;
+ return 0;
- hash_for_each_safe(m->pid_ctx, bkt, tmp, ctx, h) {
- hash_del(&ctx->h);
- kfree(ctx);
- }
+free:
+ vidtv_mux_pid_ctx_destroy(m);
+ return -ENOMEM;
}
static void vidtv_mux_update_clk(struct vidtv_mux *m)
@@ -112,32 +129,53 @@
static u32 vidtv_mux_push_si(struct vidtv_mux *m)
{
- u32 initial_offset = m->mux_buf_offset;
+ struct vidtv_psi_pat_write_args pat_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .pat = m->si.pat,
+ };
+ struct vidtv_psi_pmt_write_args pmt_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .pcr_pid = m->pcr_pid,
+ };
+ struct vidtv_psi_sdt_write_args sdt_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .sdt = m->si.sdt,
+ };
+ struct vidtv_psi_nit_write_args nit_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .nit = m->si.nit,
+ };
+ struct vidtv_psi_eit_write_args eit_args = {
+ .buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .eit = m->si.eit,
+ };
+ u32 initial_offset = m->mux_buf_offset;
struct vidtv_mux_pid_ctx *pat_ctx;
struct vidtv_mux_pid_ctx *pmt_ctx;
struct vidtv_mux_pid_ctx *sdt_ctx;
-
- struct vidtv_psi_pat_write_args pat_args = {};
- struct vidtv_psi_pmt_write_args pmt_args = {};
- struct vidtv_psi_sdt_write_args sdt_args = {};
-
- u32 nbytes; /* the number of bytes written by this function */
+ struct vidtv_mux_pid_ctx *nit_ctx;
+ struct vidtv_mux_pid_ctx *eit_ctx;
+ u32 nbytes;
u16 pmt_pid;
u32 i;
pat_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_PAT_PID);
sdt_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_SDT_PID);
+ nit_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_NIT_PID);
+ eit_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_EIT_PID);
- pat_args.buf = m->mux_buf;
pat_args.offset = m->mux_buf_offset;
- pat_args.pat = m->si.pat;
- pat_args.buf_sz = m->mux_buf_sz;
pat_args.continuity_counter = &pat_ctx->cc;
- m->mux_buf_offset += vidtv_psi_pat_write_into(pat_args);
+ m->mux_buf_offset += vidtv_psi_pat_write_into(&pat_args);
- for (i = 0; i < m->si.pat->programs; ++i) {
+ for (i = 0; i < m->si.pat->num_pmt; ++i) {
pmt_pid = vidtv_psi_pmt_get_pid(m->si.pmt_secs[i],
m->si.pat);
@@ -149,25 +187,29 @@
pmt_ctx = vidtv_mux_get_pid_ctx(m, pmt_pid);
- pmt_args.buf = m->mux_buf;
pmt_args.offset = m->mux_buf_offset;
pmt_args.pmt = m->si.pmt_secs[i];
pmt_args.pid = pmt_pid;
- pmt_args.buf_sz = m->mux_buf_sz;
pmt_args.continuity_counter = &pmt_ctx->cc;
- pmt_args.pcr_pid = m->pcr_pid;
/* write each section into buffer */
- m->mux_buf_offset += vidtv_psi_pmt_write_into(pmt_args);
+ m->mux_buf_offset += vidtv_psi_pmt_write_into(&pmt_args);
}
- sdt_args.buf = m->mux_buf;
sdt_args.offset = m->mux_buf_offset;
- sdt_args.sdt = m->si.sdt;
- sdt_args.buf_sz = m->mux_buf_sz;
sdt_args.continuity_counter = &sdt_ctx->cc;
- m->mux_buf_offset += vidtv_psi_sdt_write_into(sdt_args);
+ m->mux_buf_offset += vidtv_psi_sdt_write_into(&sdt_args);
+
+ nit_args.offset = m->mux_buf_offset;
+ nit_args.continuity_counter = &nit_ctx->cc;
+
+ m->mux_buf_offset += vidtv_psi_nit_write_into(&nit_args);
+
+ eit_args.offset = m->mux_buf_offset;
+ eit_args.continuity_counter = &eit_ctx->cc;
+
+ m->mux_buf_offset += vidtv_psi_eit_write_into(&eit_args);
nbytes = m->mux_buf_offset - initial_offset;
@@ -230,23 +272,29 @@
static u32 vidtv_mux_packetize_access_units(struct vidtv_mux *m,
struct vidtv_encoder *e)
{
- u32 nbytes = 0;
-
- struct pes_write_args args = {};
- u32 initial_offset = m->mux_buf_offset;
+ struct pes_write_args args = {
+ .dest_buf = m->mux_buf,
+ .dest_buf_sz = m->mux_buf_sz,
+ .pid = be16_to_cpu(e->es_pid),
+ .encoder_id = e->id,
+ .stream_id = be16_to_cpu(e->stream_id),
+ .send_pts = true, /* forbidden value '01'... */
+ .send_dts = false, /* ...for PTS_DTS flags */
+ };
struct vidtv_access_unit *au = e->access_units;
-
+ u32 initial_offset = m->mux_buf_offset;
+ struct vidtv_mux_pid_ctx *pid_ctx;
+ u32 nbytes = 0;
u8 *buf = NULL;
- struct vidtv_mux_pid_ctx *pid_ctx = vidtv_mux_create_pid_ctx_once(m,
- be16_to_cpu(e->es_pid));
- args.dest_buf = m->mux_buf;
- args.dest_buf_sz = m->mux_buf_sz;
- args.pid = be16_to_cpu(e->es_pid);
- args.encoder_id = e->id;
+ /* see SMPTE 302M clause 6.4 */
+ if (args.encoder_id == S302M) {
+ args.send_dts = false;
+ args.send_pts = true;
+ }
+
+ pid_ctx = vidtv_mux_create_pid_ctx_once(m, be16_to_cpu(e->es_pid));
args.continuity_counter = &pid_ctx->cc;
- args.stream_id = be16_to_cpu(e->stream_id);
- args.send_pts = true;
while (au) {
buf = e->encoder_buf + au->offset;
@@ -256,7 +304,7 @@
args.pts = au->pts;
args.pcr = m->timing.clk;
- m->mux_buf_offset += vidtv_pes_write_into(args);
+ m->mux_buf_offset += vidtv_pes_write_into(&args);
au = au->next;
}
@@ -273,10 +321,10 @@
static u32 vidtv_mux_poll_encoders(struct vidtv_mux *m)
{
- u32 nbytes = 0;
- u32 au_nbytes;
struct vidtv_channel *cur_chnl = m->channels;
struct vidtv_encoder *e = NULL;
+ u32 nbytes = 0;
+ u32 au_nbytes;
while (cur_chnl) {
e = cur_chnl->encoders;
@@ -300,18 +348,19 @@
static u32 vidtv_mux_pad_with_nulls(struct vidtv_mux *m, u32 npkts)
{
- struct null_packet_write_args args = {};
+ struct null_packet_write_args args = {
+ .dest_buf = m->mux_buf,
+ .buf_sz = m->mux_buf_sz,
+ .dest_offset = m->mux_buf_offset,
+ };
u32 initial_offset = m->mux_buf_offset;
- u32 nbytes; /* the number of bytes written by this function */
- u32 i;
struct vidtv_mux_pid_ctx *ctx;
+ u32 nbytes;
+ u32 i;
ctx = vidtv_mux_get_pid_ctx(m, TS_NULL_PACKET_PID);
- args.dest_buf = m->mux_buf;
- args.buf_sz = m->mux_buf_sz;
args.continuity_counter = &ctx->cc;
- args.dest_offset = m->mux_buf_offset;
for (i = 0; i < npkts; ++i) {
m->mux_buf_offset += vidtv_ts_null_write_into(args);
@@ -343,9 +392,9 @@
struct vidtv_mux,
mpeg_thread);
struct dtv_frontend_properties *c = &m->fe->dtv_property_cache;
+ u32 tot_bits = 0;
u32 nbytes;
u32 npkts;
- u32 tot_bits = 0;
while (m->streaming) {
nbytes = 0;
@@ -427,40 +476,62 @@
struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
struct device *dev,
- struct vidtv_mux_init_args args)
+ struct vidtv_mux_init_args *args)
{
- struct vidtv_mux *m = kzalloc(sizeof(*m), GFP_KERNEL);
+ struct vidtv_mux *m;
+
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return NULL;
m->dev = dev;
m->fe = fe;
- m->timing.pcr_period_usecs = args.pcr_period_usecs;
- m->timing.si_period_usecs = args.si_period_usecs;
+ m->timing.pcr_period_usecs = args->pcr_period_usecs;
+ m->timing.si_period_usecs = args->si_period_usecs;
- m->mux_rate_kbytes_sec = args.mux_rate_kbytes_sec;
+ m->mux_rate_kbytes_sec = args->mux_rate_kbytes_sec;
- m->on_new_packets_available_cb = args.on_new_packets_available_cb;
+ m->on_new_packets_available_cb = args->on_new_packets_available_cb;
- m->mux_buf = vzalloc(args.mux_buf_sz);
- m->mux_buf_sz = args.mux_buf_sz;
+ m->mux_buf = vzalloc(args->mux_buf_sz);
+ if (!m->mux_buf)
+ goto free_mux;
- m->pcr_pid = args.pcr_pid;
- m->transport_stream_id = args.transport_stream_id;
- m->priv = args.priv;
+ m->mux_buf_sz = args->mux_buf_sz;
+
+ m->pcr_pid = args->pcr_pid;
+ m->transport_stream_id = args->transport_stream_id;
+ m->priv = args->priv;
+ m->network_id = args->network_id;
+ m->network_name = kstrdup(args->network_name, GFP_KERNEL);
m->timing.current_jiffies = get_jiffies_64();
- if (args.channels)
- m->channels = args.channels;
+ if (args->channels)
+ m->channels = args->channels;
else
- vidtv_channels_init(m);
+ if (vidtv_channels_init(m) < 0)
+ goto free_mux_buf;
/* will alloc data for pmt_sections after initializing pat */
- vidtv_channel_si_init(m);
+ if (vidtv_channel_si_init(m) < 0)
+ goto free_channels;
INIT_WORK(&m->mpeg_thread, vidtv_mux_tick);
- vidtv_mux_pid_ctx_init(m);
+ if (vidtv_mux_pid_ctx_init(m) < 0)
+ goto free_channel_si;
return m;
+
+free_channel_si:
+ vidtv_channel_si_destroy(m);
+free_channels:
+ vidtv_channels_destroy(m);
+free_mux_buf:
+ vfree(m->mux_buf);
+free_mux:
+ kfree(m);
+ return NULL;
}
void vidtv_mux_destroy(struct vidtv_mux *m)
@@ -469,6 +540,7 @@
vidtv_mux_pid_ctx_destroy(m);
vidtv_channel_si_destroy(m);
vidtv_channels_destroy(m);
+ kfree(m->network_name);
vfree(m->mux_buf);
kfree(m);
}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.h b/drivers/media/test-drivers/vidtv/vidtv_mux.h
index 2caa606..ad82eb7 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_mux.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.h
@@ -15,9 +15,10 @@
#ifndef VIDTV_MUX_H
#define VIDTV_MUX_H
-#include <linux/types.h>
#include <linux/hashtable.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
+
#include <media/dvb_frontend.h>
#include "vidtv_psi.h"
@@ -58,12 +59,16 @@
* @pat: The PAT in use by the muxer.
* @pmt_secs: The PMT sections in use by the muxer. One for each program in the PAT.
* @sdt: The SDT in use by the muxer.
+ * @nit: The NIT in use by the muxer.
+ * @eit: the EIT in use by the muxer.
*/
struct vidtv_mux_si {
/* the SI tables */
struct vidtv_psi_table_pat *pat;
struct vidtv_psi_table_pmt **pmt_secs; /* the PMT sections */
struct vidtv_psi_table_sdt *sdt;
+ struct vidtv_psi_table_nit *nit;
+ struct vidtv_psi_table_eit *eit;
};
/**
@@ -82,8 +87,10 @@
/**
* struct vidtv_mux - A muxer abstraction loosely based in libavcodec/mpegtsenc.c
- * @mux_rate_kbytes_sec: The bit rate for the TS, in kbytes.
+ * @fe: The frontend structure allocated by the muxer.
+ * @dev: pointer to struct device.
* @timing: Keeps track of timing related information.
+ * @mux_rate_kbytes_sec: The bit rate for the TS, in kbytes.
* @pid_ctx: A hash table to keep track of per-PID metadata.
* @on_new_packets_available_cb: A callback to inform of new TS packets ready.
* @mux_buf: A pointer to a buffer for this muxer. TS packets are stored there
@@ -99,6 +106,8 @@
* @pcr_pid: The TS PID used for the PSI packets. All channels will share the
* same PCR.
* @transport_stream_id: The transport stream ID
+ * @network_id: The network ID
+ * @network_name: The network name
* @priv: Private data.
*/
struct vidtv_mux {
@@ -128,6 +137,8 @@
u16 pcr_pid;
u16 transport_stream_id;
+ u16 network_id;
+ char *network_name;
void *priv;
};
@@ -142,6 +153,8 @@
* same PCR.
* @transport_stream_id: The transport stream ID
* @channels: an optional list of channels to use
+ * @network_id: The network ID
+ * @network_name: The network name
* @priv: Private data.
*/
struct vidtv_mux_init_args {
@@ -153,12 +166,14 @@
u16 pcr_pid;
u16 transport_stream_id;
struct vidtv_channel *channels;
+ u16 network_id;
+ char *network_name;
void *priv;
};
struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
struct device *dev,
- struct vidtv_mux_init_args args);
+ struct vidtv_mux_init_args *args);
void vidtv_mux_destroy(struct vidtv_mux *m);
void vidtv_mux_start_thread(struct vidtv_mux *m);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_pes.c b/drivers/media/test-drivers/vidtv/vidtv_pes.c
index 1c75f88..782e5e7f 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_pes.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_pes.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
-#include <asm/byteorder.h>
#include "vidtv_pes.h"
#include "vidtv_common.h"
@@ -57,7 +56,7 @@
return len;
}
-static u32 vidtv_pes_write_header_stuffing(struct pes_header_write_args args)
+static u32 vidtv_pes_write_header_stuffing(struct pes_header_write_args *args)
{
/*
* This is a fixed 8-bit value equal to '0xFF' that can be inserted
@@ -65,20 +64,20 @@
* It is discarded by the decoder. No more than 32 stuffing bytes shall
* be present in one PES packet header.
*/
- if (args.n_pes_h_s_bytes > PES_HEADER_MAX_STUFFING_BYTES) {
+ if (args->n_pes_h_s_bytes > PES_HEADER_MAX_STUFFING_BYTES) {
pr_warn_ratelimited("More than %d stuffing bytes in PES packet header\n",
PES_HEADER_MAX_STUFFING_BYTES);
- args.n_pes_h_s_bytes = PES_HEADER_MAX_STUFFING_BYTES;
+ args->n_pes_h_s_bytes = PES_HEADER_MAX_STUFFING_BYTES;
}
- return vidtv_memset(args.dest_buf,
- args.dest_offset,
- args.dest_buf_sz,
+ return vidtv_memset(args->dest_buf,
+ args->dest_offset,
+ args->dest_buf_sz,
TS_FILL_BYTE,
- args.n_pes_h_s_bytes);
+ args->n_pes_h_s_bytes);
}
-static u32 vidtv_pes_write_pts_dts(struct pes_header_write_args args)
+static u32 vidtv_pes_write_pts_dts(struct pes_header_write_args *args)
{
u32 nbytes = 0; /* the number of bytes written by this function */
@@ -90,7 +89,7 @@
u64 mask2;
u64 mask3;
- if (!args.send_pts && args.send_dts)
+ if (!args->send_pts && args->send_dts)
return 0;
mask1 = GENMASK_ULL(32, 30);
@@ -98,80 +97,81 @@
mask3 = GENMASK_ULL(14, 0);
/* see ISO/IEC 13818-1 : 2000 p. 32 */
- if (args.send_pts && args.send_dts) {
- pts_dts.pts1 = (0x3 << 4) | ((args.pts & mask1) >> 29) | 0x1;
- pts_dts.pts2 = cpu_to_be16(((args.pts & mask2) >> 14) | 0x1);
- pts_dts.pts3 = cpu_to_be16(((args.pts & mask3) << 1) | 0x1);
+ if (args->send_pts && args->send_dts) {
+ pts_dts.pts1 = (0x3 << 4) | ((args->pts & mask1) >> 29) | 0x1;
+ pts_dts.pts2 = cpu_to_be16(((args->pts & mask2) >> 14) | 0x1);
+ pts_dts.pts3 = cpu_to_be16(((args->pts & mask3) << 1) | 0x1);
- pts_dts.dts1 = (0x1 << 4) | ((args.dts & mask1) >> 29) | 0x1;
- pts_dts.dts2 = cpu_to_be16(((args.dts & mask2) >> 14) | 0x1);
- pts_dts.dts3 = cpu_to_be16(((args.dts & mask3) << 1) | 0x1);
+ pts_dts.dts1 = (0x1 << 4) | ((args->dts & mask1) >> 29) | 0x1;
+ pts_dts.dts2 = cpu_to_be16(((args->dts & mask2) >> 14) | 0x1);
+ pts_dts.dts3 = cpu_to_be16(((args->dts & mask3) << 1) | 0x1);
op = &pts_dts;
op_sz = sizeof(pts_dts);
- } else if (args.send_pts) {
- pts.pts1 = (0x1 << 5) | ((args.pts & mask1) >> 29) | 0x1;
- pts.pts2 = cpu_to_be16(((args.pts & mask2) >> 14) | 0x1);
- pts.pts3 = cpu_to_be16(((args.pts & mask3) << 1) | 0x1);
+ } else if (args->send_pts) {
+ pts.pts1 = (0x1 << 5) | ((args->pts & mask1) >> 29) | 0x1;
+ pts.pts2 = cpu_to_be16(((args->pts & mask2) >> 14) | 0x1);
+ pts.pts3 = cpu_to_be16(((args->pts & mask3) << 1) | 0x1);
op = &pts;
op_sz = sizeof(pts);
}
/* copy PTS/DTS optional */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
op,
op_sz);
return nbytes;
}
-static u32 vidtv_pes_write_h(struct pes_header_write_args args)
+static u32 vidtv_pes_write_h(struct pes_header_write_args *args)
{
u32 nbytes = 0; /* the number of bytes written by this function */
struct vidtv_mpeg_pes pes_header = {};
struct vidtv_pes_optional pes_optional = {};
- struct pes_header_write_args pts_dts_args = args;
- u32 stream_id = (args.encoder_id == S302M) ? PRIVATE_STREAM_1_ID : args.stream_id;
+ struct pes_header_write_args pts_dts_args;
+ u32 stream_id = (args->encoder_id == S302M) ? PRIVATE_STREAM_1_ID : args->stream_id;
u16 pes_opt_bitfield = 0x01 << 15;
pes_header.bitfield = cpu_to_be32((PES_START_CODE_PREFIX << 8) | stream_id);
- pes_header.length = cpu_to_be16(vidtv_pes_op_get_len(args.send_pts,
- args.send_dts) +
- args.access_unit_len);
+ pes_header.length = cpu_to_be16(vidtv_pes_op_get_len(args->send_pts,
+ args->send_dts) +
+ args->access_unit_len);
- if (args.send_pts && args.send_dts)
+ if (args->send_pts && args->send_dts)
pes_opt_bitfield |= (0x3 << 6);
- else if (args.send_pts)
+ else if (args->send_pts)
pes_opt_bitfield |= (0x1 << 7);
pes_optional.bitfield = cpu_to_be16(pes_opt_bitfield);
- pes_optional.length = vidtv_pes_op_get_len(args.send_pts, args.send_dts) +
- args.n_pes_h_s_bytes -
+ pes_optional.length = vidtv_pes_op_get_len(args->send_pts, args->send_dts) +
+ args->n_pes_h_s_bytes -
sizeof(struct vidtv_pes_optional);
/* copy header */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
&pes_header,
sizeof(pes_header));
/* copy optional header bits */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
&pes_optional,
sizeof(pes_optional));
/* copy the timing information */
- pts_dts_args.dest_offset = args.dest_offset + nbytes;
- nbytes += vidtv_pes_write_pts_dts(pts_dts_args);
+ pts_dts_args = *args;
+ pts_dts_args.dest_offset = args->dest_offset + nbytes;
+ nbytes += vidtv_pes_write_pts_dts(&pts_dts_args);
/* write any PES header stuffing */
nbytes += vidtv_pes_write_header_stuffing(args);
@@ -300,14 +300,31 @@
return nbytes;
}
-u32 vidtv_pes_write_into(struct pes_write_args args)
+u32 vidtv_pes_write_into(struct pes_write_args *args)
{
- u32 unaligned_bytes = (args.dest_offset % TS_PACKET_LEN);
- struct pes_ts_header_write_args ts_header_args = {};
- struct pes_header_write_args pes_header_args = {};
- u32 remaining_len = args.access_unit_len;
+ u32 unaligned_bytes = (args->dest_offset % TS_PACKET_LEN);
+ struct pes_ts_header_write_args ts_header_args = {
+ .dest_buf = args->dest_buf,
+ .dest_buf_sz = args->dest_buf_sz,
+ .pid = args->pid,
+ .pcr = args->pcr,
+ .continuity_counter = args->continuity_counter,
+ };
+ struct pes_header_write_args pes_header_args = {
+ .dest_buf = args->dest_buf,
+ .dest_buf_sz = args->dest_buf_sz,
+ .encoder_id = args->encoder_id,
+ .send_pts = args->send_pts,
+ .pts = args->pts,
+ .send_dts = args->send_dts,
+ .dts = args->dts,
+ .stream_id = args->stream_id,
+ .n_pes_h_s_bytes = args->n_pes_h_s_bytes,
+ .access_unit_len = args->access_unit_len,
+ };
+ u32 remaining_len = args->access_unit_len;
bool wrote_pes_header = false;
- u64 last_pcr = args.pcr;
+ u64 last_pcr = args->pcr;
bool need_pcr = true;
u32 available_space;
u32 payload_size;
@@ -318,25 +335,13 @@
pr_warn_ratelimited("buffer is misaligned, while starting PES\n");
/* forcibly align and hope for the best */
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
TS_FILL_BYTE,
TS_PACKET_LEN - unaligned_bytes);
}
- if (args.send_dts && !args.send_pts) {
- pr_warn_ratelimited("forbidden value '01' for PTS_DTS flags\n");
- args.send_pts = true;
- args.pts = args.dts;
- }
-
- /* see SMPTE 302M clause 6.4 */
- if (args.encoder_id == S302M) {
- args.send_dts = false;
- args.send_pts = true;
- }
-
while (remaining_len) {
available_space = TS_PAYLOAD_LEN;
/*
@@ -345,14 +350,14 @@
* the space needed for the TS header _and_ for the PES header
*/
if (!wrote_pes_header)
- available_space -= vidtv_pes_h_get_len(args.send_pts,
- args.send_dts);
+ available_space -= vidtv_pes_h_get_len(args->send_pts,
+ args->send_dts);
/*
* if the encoder has inserted stuffing bytes in the PES
* header, account for them.
*/
- available_space -= args.n_pes_h_s_bytes;
+ available_space -= args->n_pes_h_s_bytes;
/* Take the extra adaptation into account if need to send PCR */
if (need_pcr) {
@@ -387,14 +392,9 @@
}
/* write ts header */
- ts_header_args.dest_buf = args.dest_buf;
- ts_header_args.dest_offset = args.dest_offset + nbytes;
- ts_header_args.dest_buf_sz = args.dest_buf_sz;
- ts_header_args.pid = args.pid;
- ts_header_args.pcr = args.pcr;
- ts_header_args.continuity_counter = args.continuity_counter;
- ts_header_args.wrote_pes_header = wrote_pes_header;
- ts_header_args.n_stuffing_bytes = stuff_bytes;
+ ts_header_args.dest_offset = args->dest_offset + nbytes;
+ ts_header_args.wrote_pes_header = wrote_pes_header;
+ ts_header_args.n_stuffing_bytes = stuff_bytes;
nbytes += vidtv_pes_write_ts_h(ts_header_args, need_pcr,
&last_pcr);
@@ -403,33 +403,20 @@
if (!wrote_pes_header) {
/* write the PES header only once */
- pes_header_args.dest_buf = args.dest_buf;
-
- pes_header_args.dest_offset = args.dest_offset +
- nbytes;
-
- pes_header_args.dest_buf_sz = args.dest_buf_sz;
- pes_header_args.encoder_id = args.encoder_id;
- pes_header_args.send_pts = args.send_pts;
- pes_header_args.pts = args.pts;
- pes_header_args.send_dts = args.send_dts;
- pes_header_args.dts = args.dts;
- pes_header_args.stream_id = args.stream_id;
- pes_header_args.n_pes_h_s_bytes = args.n_pes_h_s_bytes;
- pes_header_args.access_unit_len = args.access_unit_len;
-
- nbytes += vidtv_pes_write_h(pes_header_args);
- wrote_pes_header = true;
+ pes_header_args.dest_offset = args->dest_offset +
+ nbytes;
+ nbytes += vidtv_pes_write_h(&pes_header_args);
+ wrote_pes_header = true;
}
/* write as much of the payload as we possibly can */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
- args.from,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
+ args->from,
payload_size);
- args.from += payload_size;
+ args->from += payload_size;
remaining_len -= payload_size;
}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_pes.h b/drivers/media/test-drivers/vidtv/vidtv_pes.h
index 0ea9e86..963c591 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_pes.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_pes.h
@@ -14,7 +14,6 @@
#ifndef VIDTV_PES_H
#define VIDTV_PES_H
-#include <asm/byteorder.h>
#include <linux/types.h>
#include "vidtv_common.h"
@@ -114,8 +113,10 @@
* @dest_buf_sz: The size of the dest_buffer
* @pid: The PID to use for the TS packets.
* @continuity_counter: Incremented on every new TS packet.
- * @n_pes_h_s_bytes: Padding bytes. Might be used by an encoder if needed, gets
+ * @wrote_pes_header: Flag to indicate that the PES header was written
+ * @n_stuffing_bytes: Padding bytes. Might be used by an encoder if needed, gets
* discarded by the decoder.
+ * @pcr: counter driven by a 27Mhz clock.
*/
struct pes_ts_header_write_args {
void *dest_buf;
@@ -146,6 +147,7 @@
* @dts: DTS value to send.
* @n_pes_h_s_bytes: Padding bytes. Might be used by an encoder if needed, gets
* discarded by the decoder.
+ * @pcr: counter driven by a 27Mhz clock.
*/
struct pes_write_args {
void *dest_buf;
@@ -186,6 +188,6 @@
* equal to the size of the access unit, since we need space for PES headers, TS headers
* and padding bytes, if any.
*/
-u32 vidtv_pes_write_into(struct pes_write_args args);
+u32 vidtv_pes_write_into(struct pes_write_args *args);
#endif // VIDTV_PES_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
index 82cf67d..4511a2a 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
@@ -6,31 +6,31 @@
* technically be broken into one or more sections, we do not do this here,
* hence 'table' and 'section' are interchangeable for vidtv.
*
- * This code currently supports three tables: PAT, PMT and SDT. These are the
- * bare minimum to get userspace to recognize our MPEG transport stream. It can
- * be extended to support more PSI tables in the future.
- *
* Copyright (C) 2020 Daniel W. S. Almeida
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
+#include <linux/bcd.h>
#include <linux/crc32.h>
-#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
+#include <linux/slab.h>
#include <linux/string.h>
-#include <asm/byteorder.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/types.h>
-#include "vidtv_psi.h"
#include "vidtv_common.h"
+#include "vidtv_psi.h"
#include "vidtv_ts.h"
#define CRC_SIZE_IN_BYTES 4
#define MAX_VERSION_NUM 32
+#define INITIAL_CRC 0xffffffff
+#define ISO_LANGUAGE_CODE_LEN 3
static const u32 CRC_LUT[256] = {
/* from libdvbv5 */
@@ -79,7 +79,7 @@
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
};
-static inline u32 dvb_crc32(u32 crc, u8 *data, u32 len)
+static u32 dvb_crc32(u32 crc, u8 *data, u32 len)
{
/* from libdvbv5 */
while (len--)
@@ -92,40 +92,7 @@
h->version++;
}
-static inline u16 vidtv_psi_sdt_serv_get_desc_loop_len(struct vidtv_psi_table_sdt_service *s)
-{
- u16 mask;
- u16 ret;
-
- mask = GENMASK(11, 0);
-
- ret = be16_to_cpu(s->bitfield) & mask;
- return ret;
-}
-
-static inline u16 vidtv_psi_pmt_stream_get_desc_loop_len(struct vidtv_psi_table_pmt_stream *s)
-{
- u16 mask;
- u16 ret;
-
- mask = GENMASK(9, 0);
-
- ret = be16_to_cpu(s->bitfield2) & mask;
- return ret;
-}
-
-static inline u16 vidtv_psi_pmt_get_desc_loop_len(struct vidtv_psi_table_pmt *p)
-{
- u16 mask;
- u16 ret;
-
- mask = GENMASK(9, 0);
-
- ret = be16_to_cpu(p->bitfield2) & mask;
- return ret;
-}
-
-static inline u16 vidtv_psi_get_sec_len(struct vidtv_psi_table_header *h)
+static u16 vidtv_psi_get_sec_len(struct vidtv_psi_table_header *h)
{
u16 mask;
u16 ret;
@@ -136,7 +103,7 @@
return ret;
}
-inline u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p)
+u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p)
{
u16 mask;
u16 ret;
@@ -147,7 +114,7 @@
return ret;
}
-inline u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s)
+u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s)
{
u16 mask;
u16 ret;
@@ -158,10 +125,11 @@
return ret;
}
-static inline void vidtv_psi_set_desc_loop_len(__be16 *bitfield, u16 new_len, u8 desc_len_nbits)
+static void vidtv_psi_set_desc_loop_len(__be16 *bitfield, u16 new_len,
+ u8 desc_len_nbits)
{
- u16 mask;
__be16 new;
+ u16 mask;
mask = GENMASK(15, desc_len_nbits);
@@ -188,90 +156,81 @@
h->bitfield = new;
}
-static u32 vidtv_psi_ts_psi_write_into(struct psi_write_args args)
+/*
+ * Packetize PSI sections into TS packets:
+ * push a TS header (4bytes) every 184 bytes
+ * manage the continuity_counter
+ * add stuffing (i.e. padding bytes) after the CRC
+ */
+static u32 vidtv_psi_ts_psi_write_into(struct psi_write_args *args)
{
- /*
- * Packetize PSI sections into TS packets:
- * push a TS header (4bytes) every 184 bytes
- * manage the continuity_counter
- * add stuffing (i.e. padding bytes) after the CRC
- */
-
- u32 nbytes_past_boundary = (args.dest_offset % TS_PACKET_LEN);
+ struct vidtv_mpeg_ts ts_header = {
+ .sync_byte = TS_SYNC_BYTE,
+ .bitfield = cpu_to_be16((args->new_psi_section << 14) | args->pid),
+ .scrambling = 0,
+ .payload = 1,
+ .adaptation_field = 0, /* no adaptation field */
+ };
+ u32 nbytes_past_boundary = (args->dest_offset % TS_PACKET_LEN);
bool aligned = (nbytes_past_boundary == 0);
- struct vidtv_mpeg_ts ts_header = {};
-
- /* number of bytes written by this function */
- u32 nbytes = 0;
- /* how much there is left to write */
- u32 remaining_len = args.len;
- /* how much we can be written in this packet */
+ u32 remaining_len = args->len;
u32 payload_write_len = 0;
- /* where we are in the source */
u32 payload_offset = 0;
+ u32 nbytes = 0;
- const u16 PAYLOAD_START = args.new_psi_section;
-
- if (!args.crc && !args.is_crc)
+ if (!args->crc && !args->is_crc)
pr_warn_ratelimited("Missing CRC for chunk\n");
- if (args.crc)
- *args.crc = dvb_crc32(*args.crc, args.from, args.len);
+ if (args->crc)
+ *args->crc = dvb_crc32(*args->crc, args->from, args->len);
- if (args.new_psi_section && !aligned) {
+ if (args->new_psi_section && !aligned) {
pr_warn_ratelimited("Cannot write a new PSI section in a misaligned buffer\n");
/* forcibly align and hope for the best */
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
TS_FILL_BYTE,
TS_PACKET_LEN - nbytes_past_boundary);
}
while (remaining_len) {
- nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ nbytes_past_boundary = (args->dest_offset + nbytes) % TS_PACKET_LEN;
aligned = (nbytes_past_boundary == 0);
if (aligned) {
/* if at a packet boundary, write a new TS header */
- ts_header.sync_byte = TS_SYNC_BYTE;
- ts_header.bitfield = cpu_to_be16((PAYLOAD_START << 14) | args.pid);
- ts_header.scrambling = 0;
- ts_header.continuity_counter = *args.continuity_counter;
- ts_header.payload = 1;
- /* no adaptation field */
- ts_header.adaptation_field = 0;
+ ts_header.continuity_counter = *args->continuity_counter;
- /* copy the header */
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
&ts_header,
sizeof(ts_header));
/*
* This will trigger a discontinuity if the buffer is full,
* effectively dropping the packet.
*/
- vidtv_ts_inc_cc(args.continuity_counter);
+ vidtv_ts_inc_cc(args->continuity_counter);
}
/* write the pointer_field in the first byte of the payload */
- if (args.new_psi_section)
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ if (args->new_psi_section)
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
0x0,
1);
/* write as much of the payload as possible */
- nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ nbytes_past_boundary = (args->dest_offset + nbytes) % TS_PACKET_LEN;
payload_write_len = min(TS_PACKET_LEN - nbytes_past_boundary, remaining_len);
- nbytes += vidtv_memcpy(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
- args.from + payload_offset,
+ nbytes += vidtv_memcpy(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
+ args->from + payload_offset,
payload_write_len);
/* 'payload_write_len' written from a total of 'len' requested*/
@@ -283,37 +242,45 @@
* fill the rest of the packet if there is any remaining space unused
*/
- nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ nbytes_past_boundary = (args->dest_offset + nbytes) % TS_PACKET_LEN;
- if (args.is_crc)
- nbytes += vidtv_memset(args.dest_buf,
- args.dest_offset + nbytes,
- args.dest_buf_sz,
+ if (args->is_crc)
+ nbytes += vidtv_memset(args->dest_buf,
+ args->dest_offset + nbytes,
+ args->dest_buf_sz,
TS_FILL_BYTE,
TS_PACKET_LEN - nbytes_past_boundary);
return nbytes;
}
-static u32 table_section_crc32_write_into(struct crc32_write_args args)
+static u32 table_section_crc32_write_into(struct crc32_write_args *args)
{
+ struct psi_write_args psi_args = {
+ .dest_buf = args->dest_buf,
+ .from = &args->crc,
+ .len = CRC_SIZE_IN_BYTES,
+ .dest_offset = args->dest_offset,
+ .pid = args->pid,
+ .new_psi_section = false,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = true,
+ .dest_buf_sz = args->dest_buf_sz,
+ };
+
/* the CRC is the last entry in the section */
- u32 nbytes = 0;
- struct psi_write_args psi_args = {};
- psi_args.dest_buf = args.dest_buf;
- psi_args.from = &args.crc;
- psi_args.len = CRC_SIZE_IN_BYTES;
- psi_args.dest_offset = args.dest_offset;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = true;
- psi_args.dest_buf_sz = args.dest_buf_sz;
+ return vidtv_psi_ts_psi_write_into(&psi_args);
+}
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+static void vidtv_psi_desc_chain(struct vidtv_psi_desc *head, struct vidtv_psi_desc *desc)
+{
+ if (head) {
+ while (head->next)
+ head = head->next;
- return nbytes;
+ head->next = desc;
+ }
}
struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc *head,
@@ -326,6 +293,8 @@
u32 provider_name_len = provider_name ? strlen(provider_name) : 0;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
desc->type = SERVICE_DESCRIPTOR;
@@ -347,12 +316,7 @@
if (provider_name && provider_name_len)
desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
- if (head) {
- while (head->next)
- head = head->next;
-
- head->next = (struct vidtv_psi_desc *)desc;
- }
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
return desc;
}
@@ -365,6 +329,8 @@
struct vidtv_psi_desc_registration *desc;
desc = kzalloc(sizeof(*desc) + sizeof(format_id) + additional_info_len, GFP_KERNEL);
+ if (!desc)
+ return NULL;
desc->type = REGISTRATION_DESCRIPTOR;
@@ -378,44 +344,178 @@
additional_ident_info,
additional_info_len);
- if (head) {
- while (head->next)
- head = head->next;
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+}
- head->next = (struct vidtv_psi_desc *)desc;
+struct vidtv_psi_desc_network_name
+*vidtv_psi_network_name_desc_init(struct vidtv_psi_desc *head, char *network_name)
+{
+ u32 network_name_len = network_name ? strlen(network_name) : 0;
+ struct vidtv_psi_desc_network_name *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->type = NETWORK_NAME_DESCRIPTOR;
+
+ desc->length = network_name_len;
+
+ if (network_name && network_name_len)
+ desc->network_name = kstrdup(network_name, GFP_KERNEL);
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+}
+
+struct vidtv_psi_desc_service_list
+*vidtv_psi_service_list_desc_init(struct vidtv_psi_desc *head,
+ struct vidtv_psi_desc_service_list_entry *entry)
+{
+ struct vidtv_psi_desc_service_list_entry *curr_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *head_e = NULL;
+ struct vidtv_psi_desc_service_list_entry *prev_e = NULL;
+ struct vidtv_psi_desc_service_list *desc;
+ u16 length = 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->type = SERVICE_LIST_DESCRIPTOR;
+
+ while (entry) {
+ curr_e = kzalloc(sizeof(*curr_e), GFP_KERNEL);
+ if (!curr_e) {
+ while (head_e) {
+ curr_e = head_e;
+ head_e = head_e->next;
+ kfree(curr_e);
+ }
+ kfree(desc);
+ return NULL;
+ }
+
+ curr_e->service_id = entry->service_id;
+ curr_e->service_type = entry->service_type;
+
+ length += sizeof(struct vidtv_psi_desc_service_list_entry) -
+ sizeof(struct vidtv_psi_desc_service_list_entry *);
+
+ if (!head_e)
+ head_e = curr_e;
+ if (prev_e)
+ prev_e->next = curr_e;
+
+ prev_e = curr_e;
+ entry = entry->next;
}
+ desc->length = length;
+ desc->service_list = head_e;
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
+ return desc;
+}
+
+struct vidtv_psi_desc_short_event
+*vidtv_psi_short_event_desc_init(struct vidtv_psi_desc *head,
+ char *iso_language_code,
+ char *event_name,
+ char *text)
+{
+ u32 iso_len = iso_language_code ? strlen(iso_language_code) : 0;
+ u32 event_name_len = event_name ? strlen(event_name) : 0;
+ struct vidtv_psi_desc_short_event *desc;
+ u32 text_len = text ? strlen(text) : 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->type = SHORT_EVENT_DESCRIPTOR;
+
+ desc->length = ISO_LANGUAGE_CODE_LEN +
+ sizeof_field(struct vidtv_psi_desc_short_event, event_name_len) +
+ event_name_len +
+ sizeof_field(struct vidtv_psi_desc_short_event, text_len) +
+ text_len;
+
+ desc->event_name_len = event_name_len;
+ desc->text_len = text_len;
+
+ if (iso_len != ISO_LANGUAGE_CODE_LEN)
+ iso_language_code = "eng";
+
+ desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
+
+ if (event_name && event_name_len)
+ desc->event_name = kstrdup(event_name, GFP_KERNEL);
+
+ if (text && text_len)
+ desc->text = kstrdup(text, GFP_KERNEL);
+
+ vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
return desc;
}
struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
{
+ struct vidtv_psi_desc_network_name *desc_network_name;
+ struct vidtv_psi_desc_service_list *desc_service_list;
+ struct vidtv_psi_desc_short_event *desc_short_event;
+ struct vidtv_psi_desc_service *service;
struct vidtv_psi_desc *head = NULL;
struct vidtv_psi_desc *prev = NULL;
struct vidtv_psi_desc *curr = NULL;
- struct vidtv_psi_desc_service *service;
-
while (desc) {
switch (desc->type) {
case SERVICE_DESCRIPTOR:
service = (struct vidtv_psi_desc_service *)desc;
curr = (struct vidtv_psi_desc *)
- vidtv_psi_service_desc_init(head,
- service->service_type,
- service->service_name,
- service->provider_name);
+ vidtv_psi_service_desc_init(head,
+ service->service_type,
+ service->service_name,
+ service->provider_name);
+ break;
+
+ case NETWORK_NAME_DESCRIPTOR:
+ desc_network_name = (struct vidtv_psi_desc_network_name *)desc;
+ curr = (struct vidtv_psi_desc *)
+ vidtv_psi_network_name_desc_init(head,
+ desc_network_name->network_name);
+ break;
+
+ case SERVICE_LIST_DESCRIPTOR:
+ desc_service_list = (struct vidtv_psi_desc_service_list *)desc;
+ curr = (struct vidtv_psi_desc *)
+ vidtv_psi_service_list_desc_init(head,
+ desc_service_list->service_list);
+ break;
+
+ case SHORT_EVENT_DESCRIPTOR:
+ desc_short_event = (struct vidtv_psi_desc_short_event *)desc;
+ curr = (struct vidtv_psi_desc *)
+ vidtv_psi_short_event_desc_init(head,
+ desc_short_event->iso_language_code,
+ desc_short_event->event_name,
+ desc_short_event->text);
break;
case REGISTRATION_DESCRIPTOR:
default:
curr = kzalloc(sizeof(*desc) + desc->length, GFP_KERNEL);
+ if (!curr)
+ return NULL;
memcpy(curr, desc, sizeof(*desc) + desc->length);
- break;
- }
+ }
- if (curr)
- curr->next = NULL;
+ if (!curr)
+ return NULL;
+
+ curr->next = NULL;
if (!head)
head = curr;
if (prev)
@@ -430,6 +530,8 @@
void vidtv_psi_desc_destroy(struct vidtv_psi_desc *desc)
{
+ struct vidtv_psi_desc_service_list_entry *sl_entry_tmp = NULL;
+ struct vidtv_psi_desc_service_list_entry *sl_entry = NULL;
struct vidtv_psi_desc *curr = desc;
struct vidtv_psi_desc *tmp = NULL;
@@ -447,6 +549,25 @@
/* nothing to do */
break;
+ case NETWORK_NAME_DESCRIPTOR:
+ kfree(((struct vidtv_psi_desc_network_name *)tmp)->network_name);
+ break;
+
+ case SERVICE_LIST_DESCRIPTOR:
+ sl_entry = ((struct vidtv_psi_desc_service_list *)tmp)->service_list;
+ while (sl_entry) {
+ sl_entry_tmp = sl_entry;
+ sl_entry = sl_entry->next;
+ kfree(sl_entry_tmp);
+ }
+ break;
+
+ case SHORT_EVENT_DESCRIPTOR:
+ kfree(((struct vidtv_psi_desc_short_event *)tmp)->iso_language_code);
+ kfree(((struct vidtv_psi_desc_short_event *)tmp)->event_name);
+ kfree(((struct vidtv_psi_desc_short_event *)tmp)->text);
+ break;
+
default:
pr_warn_ratelimited("Possible leak: not handling descriptor type %d\n",
tmp->type);
@@ -513,63 +634,119 @@
vidtv_psi_update_version_num(&sdt->header);
}
-static u32 vidtv_psi_desc_write_into(struct desc_write_args args)
+static u32 vidtv_psi_desc_write_into(struct desc_write_args *args)
{
- /* the number of bytes written by this function */
+ struct psi_write_args psi_args = {
+ .dest_buf = args->dest_buf,
+ .from = &args->desc->type,
+ .pid = args->pid,
+ .new_psi_section = false,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = false,
+ .dest_buf_sz = args->dest_buf_sz,
+ .crc = args->crc,
+ .len = sizeof_field(struct vidtv_psi_desc, type) +
+ sizeof_field(struct vidtv_psi_desc, length),
+ };
+ struct vidtv_psi_desc_service_list_entry *serv_list_entry = NULL;
u32 nbytes = 0;
- struct psi_write_args psi_args = {};
- psi_args.dest_buf = args.dest_buf;
- psi_args.from = &args.desc->type;
+ psi_args.dest_offset = args->dest_offset + nbytes;
- psi_args.len = sizeof_field(struct vidtv_psi_desc, type) +
- sizeof_field(struct vidtv_psi_desc, length);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.dest_buf_sz;
- psi_args.crc = args.crc;
-
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
-
- switch (args.desc->type) {
+ switch (args->desc->type) {
case SERVICE_DESCRIPTOR:
- psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.dest_offset = args->dest_offset + nbytes;
psi_args.len = sizeof_field(struct vidtv_psi_desc_service, service_type) +
sizeof_field(struct vidtv_psi_desc_service, provider_name_len);
- psi_args.from = &((struct vidtv_psi_desc_service *)args.desc)->service_type;
+ psi_args.from = &((struct vidtv_psi_desc_service *)args->desc)->service_type;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.len = ((struct vidtv_psi_desc_service *)args.desc)->provider_name_len;
- psi_args.from = ((struct vidtv_psi_desc_service *)args.desc)->provider_name;
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_service *)args->desc)->provider_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_service *)args->desc)->provider_name;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.dest_offset = args->dest_offset + nbytes;
psi_args.len = sizeof_field(struct vidtv_psi_desc_service, service_name_len);
- psi_args.from = &((struct vidtv_psi_desc_service *)args.desc)->service_name_len;
+ psi_args.from = &((struct vidtv_psi_desc_service *)args->desc)->service_name_len;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.len = ((struct vidtv_psi_desc_service *)args.desc)->service_name_len;
- psi_args.from = ((struct vidtv_psi_desc_service *)args.desc)->service_name;
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_service *)args->desc)->service_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_service *)args->desc)->service_name;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+ break;
+
+ case NETWORK_NAME_DESCRIPTOR:
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = args->desc->length;
+ psi_args.from = ((struct vidtv_psi_desc_network_name *)args->desc)->network_name;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+ break;
+
+ case SERVICE_LIST_DESCRIPTOR:
+ serv_list_entry = ((struct vidtv_psi_desc_service_list *)args->desc)->service_list;
+ while (serv_list_entry) {
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = sizeof(struct vidtv_psi_desc_service_list_entry) -
+ sizeof(struct vidtv_psi_desc_service_list_entry *);
+ psi_args.from = serv_list_entry;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ serv_list_entry = serv_list_entry->next;
+ }
+ break;
+
+ case SHORT_EVENT_DESCRIPTOR:
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ISO_LANGUAGE_CODE_LEN;
+ psi_args.from = ((struct vidtv_psi_desc_short_event *)
+ args->desc)->iso_language_code;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = sizeof_field(struct vidtv_psi_desc_short_event, event_name_len);
+ psi_args.from = &((struct vidtv_psi_desc_short_event *)
+ args->desc)->event_name_len;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_short_event *)args->desc)->event_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_short_event *)args->desc)->event_name;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = sizeof_field(struct vidtv_psi_desc_short_event, text_len);
+ psi_args.from = &((struct vidtv_psi_desc_short_event *)args->desc)->text_len;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_short_event *)args->desc)->text_len;
+ psi_args.from = ((struct vidtv_psi_desc_short_event *)args->desc)->text;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
break;
case REGISTRATION_DESCRIPTOR:
default:
- psi_args.dest_offset = args.dest_offset + nbytes;
- psi_args.len = args.desc->length;
- psi_args.from = &args.desc->data;
+ psi_args.dest_offset = args->dest_offset + nbytes;
+ psi_args.len = args->desc->length;
+ psi_args.from = &args->desc->data;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
break;
}
@@ -577,40 +754,37 @@
}
static u32
-vidtv_psi_table_header_write_into(struct header_write_args args)
+vidtv_psi_table_header_write_into(struct header_write_args *args)
{
- /* the number of bytes written by this function */
- u32 nbytes = 0;
- struct psi_write_args psi_args = {};
+ struct psi_write_args psi_args = {
+ .dest_buf = args->dest_buf,
+ .from = args->h,
+ .len = sizeof(struct vidtv_psi_table_header),
+ .dest_offset = args->dest_offset,
+ .pid = args->pid,
+ .new_psi_section = true,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = false,
+ .dest_buf_sz = args->dest_buf_sz,
+ .crc = args->crc,
+ };
- psi_args.dest_buf = args.dest_buf;
- psi_args.from = args.h;
- psi_args.len = sizeof(struct vidtv_psi_table_header);
- psi_args.dest_offset = args.dest_offset;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = true;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.dest_buf_sz;
- psi_args.crc = args.crc;
-
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
-
- return nbytes;
+ return vidtv_psi_ts_psi_write_into(&psi_args);
}
void
vidtv_psi_pat_table_update_sec_len(struct vidtv_psi_table_pat *pat)
{
- /* see ISO/IEC 13818-1 : 2000 p.43 */
u16 length = 0;
u32 i;
+ /* see ISO/IEC 13818-1 : 2000 p.43 */
+
/* from immediately after 'section_length' until 'last_section_number'*/
length += PAT_LEN_UNTIL_LAST_SECTION_NUMBER;
/* do not count the pointer */
- for (i = 0; i < pat->programs; ++i)
+ for (i = 0; i < pat->num_pat; ++i)
length += sizeof(struct vidtv_psi_table_pat_program) -
sizeof(struct vidtv_psi_table_pat_program *);
@@ -621,10 +795,11 @@
void vidtv_psi_pmt_table_update_sec_len(struct vidtv_psi_table_pmt *pmt)
{
- /* see ISO/IEC 13818-1 : 2000 p.46 */
- u16 length = 0;
struct vidtv_psi_table_pmt_stream *s = pmt->stream;
u16 desc_loop_len;
+ u16 length = 0;
+
+ /* see ISO/IEC 13818-1 : 2000 p.46 */
/* from immediately after 'section_length' until 'program_info_length'*/
length += PMT_LEN_UNTIL_PROGRAM_INFO_LENGTH;
@@ -655,10 +830,11 @@
void vidtv_psi_sdt_table_update_sec_len(struct vidtv_psi_table_sdt *sdt)
{
- /* see ETSI EN 300 468 V 1.10.1 p.24 */
- u16 length = 0;
struct vidtv_psi_table_sdt_service *s = sdt->service;
u16 desc_loop_len;
+ u16 length = 0;
+
+ /* see ETSI EN 300 468 V 1.10.1 p.24 */
/*
* from immediately after 'section_length' until
@@ -681,7 +857,6 @@
}
length += CRC_SIZE_IN_BYTES;
-
vidtv_psi_set_sec_len(&sdt->header, length);
}
@@ -694,6 +869,8 @@
const u16 RESERVED = 0x07;
program = kzalloc(sizeof(*program), GFP_KERNEL);
+ if (!program)
+ return NULL;
program->service_id = cpu_to_be16(service_id);
@@ -714,8 +891,8 @@
void
vidtv_psi_pat_program_destroy(struct vidtv_psi_table_pat_program *p)
{
- struct vidtv_psi_table_pat_program *curr = p;
struct vidtv_psi_table_pat_program *tmp = NULL;
+ struct vidtv_psi_table_pat_program *curr = p;
while (curr) {
tmp = curr;
@@ -724,42 +901,49 @@
}
}
+/* This function transfers ownership of p to the table */
void
vidtv_psi_pat_program_assign(struct vidtv_psi_table_pat *pat,
struct vidtv_psi_table_pat_program *p)
{
- /* This function transfers ownership of p to the table */
+ struct vidtv_psi_table_pat_program *program;
+ u16 program_count;
- u16 program_count = 0;
- struct vidtv_psi_table_pat_program *program = p;
+ do {
+ program_count = 0;
+ program = p;
- if (p == pat->program)
- return;
+ if (p == pat->program)
+ return;
- while (program) {
- ++program_count;
- program = program->next;
- }
+ while (program) {
+ ++program_count;
+ program = program->next;
+ }
- pat->programs = program_count;
- pat->program = p;
+ pat->num_pat = program_count;
+ pat->program = p;
- /* Recompute section length */
- vidtv_psi_pat_table_update_sec_len(pat);
+ /* Recompute section length */
+ vidtv_psi_pat_table_update_sec_len(pat);
- if (vidtv_psi_get_sec_len(&pat->header) > MAX_SECTION_LEN)
- vidtv_psi_pat_program_assign(pat, NULL);
+ p = NULL;
+ } while (vidtv_psi_get_sec_len(&pat->header) > MAX_SECTION_LEN);
vidtv_psi_update_version_num(&pat->header);
}
struct vidtv_psi_table_pat *vidtv_psi_pat_table_init(u16 transport_stream_id)
{
- struct vidtv_psi_table_pat *pat = kzalloc(sizeof(*pat), GFP_KERNEL);
+ struct vidtv_psi_table_pat *pat;
const u16 SYNTAX = 0x1;
const u16 ZERO = 0x0;
const u16 ONES = 0x03;
+ pat = kzalloc(sizeof(*pat), GFP_KERNEL);
+ if (!pat)
+ return NULL;
+
pat->header.table_id = 0x0;
pat->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ZERO << 14) | (ONES << 12));
@@ -772,70 +956,68 @@
pat->header.section_id = 0x0;
pat->header.last_section = 0x0;
- pat->programs = 0;
-
vidtv_psi_pat_table_update_sec_len(pat);
return pat;
}
-u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args args)
+u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args *args)
{
- /* the number of bytes written by this function */
+ struct vidtv_psi_table_pat_program *p = args->pat->program;
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .pid = VIDTV_PAT_PID,
+ .h = &args->pat->header,
+ .continuity_counter = args->continuity_counter,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_PAT_PID,
+ .new_psi_section = false,
+ .continuity_counter = args->continuity_counter,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_PAT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ u32 crc = INITIAL_CRC;
u32 nbytes = 0;
- const u16 pat_pid = VIDTV_PAT_PID;
- u32 crc = 0xffffffff;
- struct vidtv_psi_table_pat_program *p = args.pat->program;
+ vidtv_psi_pat_table_update_sec_len(args->pat);
- struct header_write_args h_args = {};
- struct psi_write_args psi_args = {};
- struct crc32_write_args c_args = {};
-
- vidtv_psi_pat_table_update_sec_len(args.pat);
-
- h_args.dest_buf = args.buf;
- h_args.dest_offset = args.offset;
- h_args.h = &args.pat->header;
- h_args.pid = pat_pid;
- h_args.continuity_counter = args.continuity_counter;
- h_args.dest_buf_sz = args.buf_sz;
h_args.crc = &crc;
- nbytes += vidtv_psi_table_header_write_into(h_args);
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
/* note that the field 'u16 programs' is not really part of the PAT */
- psi_args.dest_buf = args.buf;
- psi_args.pid = pat_pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.buf_sz;
- psi_args.crc = &crc;
+ psi_args.crc = &crc;
while (p) {
/* copy the PAT programs */
psi_args.from = p;
/* skip the pointer */
psi_args.len = sizeof(*p) -
- sizeof(struct vidtv_psi_table_pat_program *);
- psi_args.dest_offset = args.offset + nbytes;
+ sizeof(struct vidtv_psi_table_pat_program *);
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
p = p->next;
}
- c_args.dest_buf = args.buf;
- c_args.dest_offset = args.offset + nbytes;
+ c_args.dest_offset = args->offset + nbytes;
+ c_args.continuity_counter = args->continuity_counter;
c_args.crc = cpu_to_be32(crc);
- c_args.pid = pat_pid;
- c_args.continuity_counter = args.continuity_counter;
- c_args.dest_buf_sz = args.buf_sz;
/* Write the CRC32 at the end */
- nbytes += table_section_crc32_write_into(c_args);
+ nbytes += table_section_crc32_write_into(&c_args);
return nbytes;
}
@@ -859,6 +1041,8 @@
u16 desc_loop_len;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return NULL;
stream->type = stream_type;
@@ -883,8 +1067,8 @@
void vidtv_psi_pmt_stream_destroy(struct vidtv_psi_table_pmt_stream *s)
{
- struct vidtv_psi_table_pmt_stream *curr_stream = s;
struct vidtv_psi_table_pmt_stream *tmp_stream = NULL;
+ struct vidtv_psi_table_pmt_stream *curr_stream = s;
while (curr_stream) {
tmp_stream = curr_stream;
@@ -897,15 +1081,16 @@
void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt,
struct vidtv_psi_table_pmt_stream *s)
{
- /* This function transfers ownership of s to the table */
- if (s == pmt->stream)
- return;
+ do {
+ /* This function transfers ownership of s to the table */
+ if (s == pmt->stream)
+ return;
- pmt->stream = s;
- vidtv_psi_pmt_table_update_sec_len(pmt);
+ pmt->stream = s;
+ vidtv_psi_pmt_table_update_sec_len(pmt);
- if (vidtv_psi_get_sec_len(&pmt->header) > MAX_SECTION_LEN)
- vidtv_psi_pmt_stream_assign(pmt, NULL);
+ s = NULL;
+ } while (vidtv_psi_get_sec_len(&pmt->header) > MAX_SECTION_LEN);
vidtv_psi_update_version_num(&pmt->header);
}
@@ -933,14 +1118,18 @@
struct vidtv_psi_table_pmt *vidtv_psi_pmt_table_init(u16 program_number,
u16 pcr_pid)
{
- struct vidtv_psi_table_pmt *pmt = kzalloc(sizeof(*pmt), GFP_KERNEL);
- const u16 SYNTAX = 0x1;
- const u16 ZERO = 0x0;
- const u16 ONES = 0x03;
+ struct vidtv_psi_table_pmt *pmt;
const u16 RESERVED1 = 0x07;
const u16 RESERVED2 = 0x0f;
+ const u16 SYNTAX = 0x1;
+ const u16 ONES = 0x03;
+ const u16 ZERO = 0x0;
u16 desc_loop_len;
+ pmt = kzalloc(sizeof(*pmt), GFP_KERNEL);
+ if (!pmt)
+ return NULL;
+
if (!pcr_pid)
pcr_pid = 0x1fff;
@@ -970,87 +1159,84 @@
return pmt;
}
-u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args args)
+u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
{
- /* the number of bytes written by this function */
+ struct vidtv_psi_desc *table_descriptor = args->pmt->descriptor;
+ struct vidtv_psi_table_pmt_stream *stream = args->pmt->stream;
+ struct vidtv_psi_desc *stream_descriptor;
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->pmt->header,
+ .pid = args->pid,
+ .continuity_counter = args->continuity_counter,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .from = &args->pmt->bitfield,
+ .len = sizeof_field(struct vidtv_psi_table_pmt, bitfield) +
+ sizeof_field(struct vidtv_psi_table_pmt, bitfield2),
+ .pid = args->pid,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .desc = table_descriptor,
+ .pid = args->pid,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = args->pid,
+ .dest_buf_sz = args->buf_sz,
+ };
+ u32 crc = INITIAL_CRC;
u32 nbytes = 0;
- u32 crc = 0xffffffff;
- struct vidtv_psi_desc *table_descriptor = args.pmt->descriptor;
- struct vidtv_psi_table_pmt_stream *stream = args.pmt->stream;
- struct vidtv_psi_desc *stream_descriptor = (stream) ?
- args.pmt->stream->descriptor :
- NULL;
+ vidtv_psi_pmt_table_update_sec_len(args->pmt);
- struct header_write_args h_args = {};
- struct psi_write_args psi_args = {};
- struct desc_write_args d_args = {};
- struct crc32_write_args c_args = {};
-
- vidtv_psi_pmt_table_update_sec_len(args.pmt);
-
- h_args.dest_buf = args.buf;
- h_args.dest_offset = args.offset;
- h_args.h = &args.pmt->header;
- h_args.pid = args.pid;
- h_args.continuity_counter = args.continuity_counter;
- h_args.dest_buf_sz = args.buf_sz;
h_args.crc = &crc;
- nbytes += vidtv_psi_table_header_write_into(h_args);
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
/* write the two bitfields */
- psi_args.dest_buf = args.buf;
- psi_args.from = &args.pmt->bitfield;
- psi_args.len = sizeof_field(struct vidtv_psi_table_pmt, bitfield) +
- sizeof_field(struct vidtv_psi_table_pmt, bitfield2);
-
- psi_args.dest_offset = args.offset + nbytes;
- psi_args.pid = args.pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.buf_sz;
- psi_args.crc = &crc;
-
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
while (table_descriptor) {
/* write the descriptors, if any */
- d_args.dest_buf = args.buf;
- d_args.dest_offset = args.offset + nbytes;
- d_args.desc = table_descriptor;
- d_args.pid = args.pid;
- d_args.continuity_counter = args.continuity_counter;
- d_args.dest_buf_sz = args.buf_sz;
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.continuity_counter = args->continuity_counter;
d_args.crc = &crc;
- nbytes += vidtv_psi_desc_write_into(d_args);
+ nbytes += vidtv_psi_desc_write_into(&d_args);
table_descriptor = table_descriptor->next;
}
+ psi_args.len += sizeof_field(struct vidtv_psi_table_pmt_stream, type);
while (stream) {
/* write the streams, if any */
psi_args.from = stream;
- psi_args.len = sizeof_field(struct vidtv_psi_table_pmt_stream, type) +
- sizeof_field(struct vidtv_psi_table_pmt_stream, bitfield) +
- sizeof_field(struct vidtv_psi_table_pmt_stream, bitfield2);
- psi_args.dest_offset = args.offset + nbytes;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ stream_descriptor = stream->descriptor;
while (stream_descriptor) {
/* write the stream descriptors, if any */
- d_args.dest_buf = args.buf;
- d_args.dest_offset = args.offset + nbytes;
+ d_args.dest_offset = args->offset + nbytes;
d_args.desc = stream_descriptor;
- d_args.pid = args.pid;
- d_args.continuity_counter = args.continuity_counter;
- d_args.dest_buf_sz = args.buf_sz;
+ d_args.continuity_counter = args->continuity_counter;
d_args.crc = &crc;
- nbytes += vidtv_psi_desc_write_into(d_args);
+ nbytes += vidtv_psi_desc_write_into(&d_args);
stream_descriptor = stream_descriptor->next;
}
@@ -1058,15 +1244,12 @@
stream = stream->next;
}
- c_args.dest_buf = args.buf;
- c_args.dest_offset = args.offset + nbytes;
+ c_args.dest_offset = args->offset + nbytes;
c_args.crc = cpu_to_be32(crc);
- c_args.pid = args.pid;
- c_args.continuity_counter = args.continuity_counter;
- c_args.dest_buf_sz = args.buf_sz;
+ c_args.continuity_counter = args->continuity_counter;
/* Write the CRC32 at the end */
- nbytes += table_section_crc32_write_into(c_args);
+ nbytes += table_section_crc32_write_into(&c_args);
return nbytes;
}
@@ -1078,16 +1261,20 @@
kfree(pmt);
}
-struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 transport_stream_id)
+struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 network_id,
+ u16 transport_stream_id)
{
- struct vidtv_psi_table_sdt *sdt = kzalloc(sizeof(*sdt), GFP_KERNEL);
- const u16 SYNTAX = 0x1;
- const u16 ONE = 0x1;
- const u16 ONES = 0x03;
+ struct vidtv_psi_table_sdt *sdt;
const u16 RESERVED = 0xff;
+ const u16 SYNTAX = 0x1;
+ const u16 ONES = 0x03;
+ const u16 ONE = 0x1;
+
+ sdt = kzalloc(sizeof(*sdt), GFP_KERNEL);
+ if (!sdt)
+ return NULL;
sdt->header.table_id = 0x42;
-
sdt->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ONE << 14) | (ONES << 12));
/*
@@ -1111,7 +1298,7 @@
* This can be changed to something more useful, when support for
* NIT gets added
*/
- sdt->network_id = cpu_to_be16(0xff01);
+ sdt->network_id = cpu_to_be16(network_id);
sdt->reserved = RESERVED;
vidtv_psi_sdt_table_update_sec_len(sdt);
@@ -1119,74 +1306,79 @@
return sdt;
}
-u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args args)
+u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args *args)
{
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->sdt->header,
+ .pid = VIDTV_SDT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .len = sizeof_field(struct vidtv_psi_table_sdt, network_id) +
+ sizeof_field(struct vidtv_psi_table_sdt, reserved),
+ .pid = VIDTV_SDT_PID,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_SDT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_SDT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct vidtv_psi_table_sdt_service *service = args->sdt->service;
+ struct vidtv_psi_desc *service_desc;
u32 nbytes = 0;
- u16 sdt_pid = VIDTV_SDT_PID; /* see ETSI EN 300 468 v1.15.1 p. 11 */
+ u32 crc = INITIAL_CRC;
- u32 crc = 0xffffffff;
+ /* see ETSI EN 300 468 v1.15.1 p. 11 */
- struct vidtv_psi_table_sdt_service *service = args.sdt->service;
- struct vidtv_psi_desc *service_desc = (args.sdt->service) ?
- args.sdt->service->descriptor :
- NULL;
+ vidtv_psi_sdt_table_update_sec_len(args->sdt);
- struct header_write_args h_args = {};
- struct psi_write_args psi_args = {};
- struct desc_write_args d_args = {};
- struct crc32_write_args c_args = {};
-
- vidtv_psi_sdt_table_update_sec_len(args.sdt);
-
- h_args.dest_buf = args.buf;
- h_args.dest_offset = args.offset;
- h_args.h = &args.sdt->header;
- h_args.pid = sdt_pid;
- h_args.continuity_counter = args.continuity_counter;
- h_args.dest_buf_sz = args.buf_sz;
+ h_args.continuity_counter = args->continuity_counter;
h_args.crc = &crc;
- nbytes += vidtv_psi_table_header_write_into(h_args);
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
- psi_args.dest_buf = args.buf;
- psi_args.from = &args.sdt->network_id;
-
- psi_args.len = sizeof_field(struct vidtv_psi_table_sdt, network_id) +
- sizeof_field(struct vidtv_psi_table_sdt, reserved);
-
- psi_args.dest_offset = args.offset + nbytes;
- psi_args.pid = sdt_pid;
- psi_args.new_psi_section = false;
- psi_args.continuity_counter = args.continuity_counter;
- psi_args.is_crc = false;
- psi_args.dest_buf_sz = args.buf_sz;
+ psi_args.from = &args->sdt->network_id;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
psi_args.crc = &crc;
/* copy u16 network_id + u8 reserved)*/
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ /* skip both pointers at the end */
+ psi_args.len = sizeof(struct vidtv_psi_table_sdt_service) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_sdt_service *);
while (service) {
/* copy the services, if any */
psi_args.from = service;
- /* skip both pointers at the end */
- psi_args.len = sizeof(struct vidtv_psi_table_sdt_service) -
- sizeof(struct vidtv_psi_desc *) -
- sizeof(struct vidtv_psi_table_sdt_service *);
- psi_args.dest_offset = args.offset + nbytes;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
- nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ service_desc = service->descriptor;
while (service_desc) {
/* copy the service descriptors, if any */
- d_args.dest_buf = args.buf;
- d_args.dest_offset = args.offset + nbytes;
+ d_args.dest_offset = args->offset + nbytes;
d_args.desc = service_desc;
- d_args.pid = sdt_pid;
- d_args.continuity_counter = args.continuity_counter;
- d_args.dest_buf_sz = args.buf_sz;
+ d_args.continuity_counter = args->continuity_counter;
d_args.crc = &crc;
- nbytes += vidtv_psi_desc_write_into(d_args);
+ nbytes += vidtv_psi_desc_write_into(&d_args);
service_desc = service_desc->next;
}
@@ -1194,15 +1386,12 @@
service = service->next;
}
- c_args.dest_buf = args.buf;
- c_args.dest_offset = args.offset + nbytes;
+ c_args.dest_offset = args->offset + nbytes;
c_args.crc = cpu_to_be32(crc);
- c_args.pid = sdt_pid;
- c_args.continuity_counter = args.continuity_counter;
- c_args.dest_buf_sz = args.buf_sz;
+ c_args.continuity_counter = args->continuity_counter;
/* Write the CRC at the end */
- nbytes += table_section_crc32_write_into(c_args);
+ nbytes += table_section_crc32_write_into(&c_args);
return nbytes;
}
@@ -1215,11 +1404,15 @@
struct vidtv_psi_table_sdt_service
*vidtv_psi_sdt_service_init(struct vidtv_psi_table_sdt_service *head,
- u16 service_id)
+ u16 service_id,
+ bool eit_schedule,
+ bool eit_present_following)
{
struct vidtv_psi_table_sdt_service *service;
service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (!service)
+ return NULL;
/*
* ETSI 300 468: this is a 16bit field which serves as a label to
@@ -1228,8 +1421,8 @@
* corresponding program_map_section
*/
service->service_id = cpu_to_be16(service_id);
- service->EIT_schedule = 0x0;
- service->EIT_present_following = 0x0;
+ service->EIT_schedule = eit_schedule;
+ service->EIT_present_following = eit_present_following;
service->reserved = 0x3f;
service->bitfield = cpu_to_be16(RUNNING << 13);
@@ -1262,53 +1455,78 @@
vidtv_psi_sdt_service_assign(struct vidtv_psi_table_sdt *sdt,
struct vidtv_psi_table_sdt_service *service)
{
- if (service == sdt->service)
- return;
+ do {
+ if (service == sdt->service)
+ return;
- sdt->service = service;
+ sdt->service = service;
- /* recompute section length */
- vidtv_psi_sdt_table_update_sec_len(sdt);
+ /* recompute section length */
+ vidtv_psi_sdt_table_update_sec_len(sdt);
- if (vidtv_psi_get_sec_len(&sdt->header) > MAX_SECTION_LEN)
- vidtv_psi_sdt_service_assign(sdt, NULL);
+ service = NULL;
+ } while (vidtv_psi_get_sec_len(&sdt->header) > MAX_SECTION_LEN);
vidtv_psi_update_version_num(&sdt->header);
}
+/*
+ * PMTs contain information about programs. For each program,
+ * there is one PMT section. This function will create a section
+ * for each program found in the PAT
+ */
struct vidtv_psi_table_pmt**
-vidtv_psi_pmt_create_sec_for_each_pat_entry(struct vidtv_psi_table_pat *pat, u16 pcr_pid)
+vidtv_psi_pmt_create_sec_for_each_pat_entry(struct vidtv_psi_table_pat *pat,
+ u16 pcr_pid)
{
- /*
- * PMTs contain information about programs. For each program,
- * there is one PMT section. This function will create a section
- * for each program found in the PAT
- */
- struct vidtv_psi_table_pat_program *program = pat->program;
+ struct vidtv_psi_table_pat_program *program;
struct vidtv_psi_table_pmt **pmt_secs;
- u32 i = 0;
+ u32 i = 0, num_pmt = 0;
- /* a section for each program_id */
- pmt_secs = kcalloc(pat->programs,
- sizeof(struct vidtv_psi_table_pmt *),
- GFP_KERNEL);
-
+ /*
+ * The number of PMT entries is the number of PAT entries
+ * that contain service_id. That exclude special tables, like NIT
+ */
+ program = pat->program;
while (program) {
- pmt_secs[i] = vidtv_psi_pmt_table_init(be16_to_cpu(program->service_id), pcr_pid);
- ++i;
+ if (program->service_id)
+ num_pmt++;
program = program->next;
}
+ pmt_secs = kcalloc(num_pmt,
+ sizeof(struct vidtv_psi_table_pmt *),
+ GFP_KERNEL);
+ if (!pmt_secs)
+ return NULL;
+
+ for (program = pat->program; program; program = program->next) {
+ if (!program->service_id)
+ continue;
+ pmt_secs[i] = vidtv_psi_pmt_table_init(be16_to_cpu(program->service_id),
+ pcr_pid);
+
+ if (!pmt_secs[i]) {
+ while (i > 0) {
+ i--;
+ vidtv_psi_pmt_table_destroy(pmt_secs[i]);
+ }
+ return NULL;
+ }
+ i++;
+ }
+ pat->num_pmt = num_pmt;
+
return pmt_secs;
}
+/* find the PMT section associated with 'program_num' */
struct vidtv_psi_table_pmt
*vidtv_psi_find_pmt_sec(struct vidtv_psi_table_pmt **pmt_sections,
u16 nsections,
u16 program_num)
{
- /* find the PMT section associated with 'program_num' */
struct vidtv_psi_table_pmt *sec = NULL;
u32 i;
@@ -1320,3 +1538,488 @@
return NULL; /* not found */
}
+
+static void vidtv_psi_nit_table_update_sec_len(struct vidtv_psi_table_nit *nit)
+{
+ u16 length = 0;
+ struct vidtv_psi_table_transport *t = nit->transport;
+ u16 desc_loop_len;
+ u16 transport_loop_len = 0;
+
+ /*
+ * from immediately after 'section_length' until
+ * 'network_descriptor_length'
+ */
+ length += NIT_LEN_UNTIL_NETWORK_DESCRIPTOR_LEN;
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(nit->descriptor);
+ vidtv_psi_set_desc_loop_len(&nit->bitfield, desc_loop_len, 12);
+
+ length += desc_loop_len;
+
+ length += sizeof_field(struct vidtv_psi_table_nit, bitfield2);
+
+ while (t) {
+ /* skip both pointers at the end */
+ transport_loop_len += sizeof(struct vidtv_psi_table_transport) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_transport *);
+
+ length += transport_loop_len;
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(t->descriptor);
+ vidtv_psi_set_desc_loop_len(&t->bitfield, desc_loop_len, 12);
+
+ length += desc_loop_len;
+
+ t = t->next;
+ }
+
+ // Actually sets the transport stream loop len, maybe rename this function later
+ vidtv_psi_set_desc_loop_len(&nit->bitfield2, transport_loop_len, 12);
+ length += CRC_SIZE_IN_BYTES;
+
+ vidtv_psi_set_sec_len(&nit->header, length);
+}
+
+struct vidtv_psi_table_nit
+*vidtv_psi_nit_table_init(u16 network_id,
+ u16 transport_stream_id,
+ char *network_name,
+ struct vidtv_psi_desc_service_list_entry *service_list)
+{
+ struct vidtv_psi_table_transport *transport;
+ struct vidtv_psi_table_nit *nit;
+ const u16 SYNTAX = 0x1;
+ const u16 ONES = 0x03;
+ const u16 ONE = 0x1;
+
+ nit = kzalloc(sizeof(*nit), GFP_KERNEL);
+ if (!nit)
+ return NULL;
+
+ transport = kzalloc(sizeof(*transport), GFP_KERNEL);
+ if (!transport)
+ goto free_nit;
+
+ nit->header.table_id = 0x40; // ACTUAL_NETWORK
+
+ nit->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ONE << 14) | (ONES << 12));
+
+ nit->header.id = cpu_to_be16(network_id);
+ nit->header.current_next = ONE;
+
+ nit->header.version = 0x1f;
+
+ nit->header.one2 = ONES;
+ nit->header.section_id = 0;
+ nit->header.last_section = 0;
+
+ nit->bitfield = cpu_to_be16(0xf);
+ nit->bitfield2 = cpu_to_be16(0xf);
+
+ nit->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_network_name_desc_init(NULL, network_name);
+ if (!nit->descriptor)
+ goto free_transport;
+
+ transport->transport_id = cpu_to_be16(transport_stream_id);
+ transport->network_id = cpu_to_be16(network_id);
+ transport->bitfield = cpu_to_be16(0xf);
+ transport->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_service_list_desc_init(NULL, service_list);
+ if (!transport->descriptor)
+ goto free_nit_desc;
+
+ nit->transport = transport;
+
+ vidtv_psi_nit_table_update_sec_len(nit);
+
+ return nit;
+
+free_nit_desc:
+ vidtv_psi_desc_destroy((struct vidtv_psi_desc *)nit->descriptor);
+
+free_transport:
+ kfree(transport);
+free_nit:
+ kfree(nit);
+ return NULL;
+}
+
+u32 vidtv_psi_nit_write_into(struct vidtv_psi_nit_write_args *args)
+{
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->nit->header,
+ .pid = VIDTV_NIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .from = &args->nit->bitfield,
+ .len = sizeof_field(struct vidtv_psi_table_nit, bitfield),
+ .pid = VIDTV_NIT_PID,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_NIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_NIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct vidtv_psi_desc *table_descriptor = args->nit->descriptor;
+ struct vidtv_psi_table_transport *transport = args->nit->transport;
+ struct vidtv_psi_desc *transport_descriptor;
+ u32 crc = INITIAL_CRC;
+ u32 nbytes = 0;
+
+ vidtv_psi_nit_table_update_sec_len(args->nit);
+
+ h_args.continuity_counter = args->continuity_counter;
+ h_args.crc = &crc;
+
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
+
+ /* write the bitfield */
+
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+ psi_args.crc = &crc;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ while (table_descriptor) {
+ /* write the descriptors, if any */
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.desc = table_descriptor;
+ d_args.continuity_counter = args->continuity_counter;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(&d_args);
+
+ table_descriptor = table_descriptor->next;
+ }
+
+ /* write the second bitfield */
+ psi_args.from = &args->nit->bitfield2;
+ psi_args.len = sizeof_field(struct vidtv_psi_table_nit, bitfield2);
+ psi_args.dest_offset = args->offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ psi_args.len = sizeof_field(struct vidtv_psi_table_transport, transport_id) +
+ sizeof_field(struct vidtv_psi_table_transport, network_id) +
+ sizeof_field(struct vidtv_psi_table_transport, bitfield);
+ while (transport) {
+ /* write the transport sections, if any */
+ psi_args.from = transport;
+ psi_args.dest_offset = args->offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ transport_descriptor = transport->descriptor;
+
+ while (transport_descriptor) {
+ /* write the transport descriptors, if any */
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.desc = transport_descriptor;
+ d_args.continuity_counter = args->continuity_counter;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(&d_args);
+
+ transport_descriptor = transport_descriptor->next;
+ }
+
+ transport = transport->next;
+ }
+
+ c_args.dest_offset = args->offset + nbytes;
+ c_args.crc = cpu_to_be32(crc);
+ c_args.continuity_counter = args->continuity_counter;
+
+ /* Write the CRC32 at the end */
+ nbytes += table_section_crc32_write_into(&c_args);
+
+ return nbytes;
+}
+
+static void vidtv_psi_transport_destroy(struct vidtv_psi_table_transport *t)
+{
+ struct vidtv_psi_table_transport *tmp_t = NULL;
+ struct vidtv_psi_table_transport *curr_t = t;
+
+ while (curr_t) {
+ tmp_t = curr_t;
+ curr_t = curr_t->next;
+ vidtv_psi_desc_destroy(tmp_t->descriptor);
+ kfree(tmp_t);
+ }
+}
+
+void vidtv_psi_nit_table_destroy(struct vidtv_psi_table_nit *nit)
+{
+ vidtv_psi_desc_destroy(nit->descriptor);
+ vidtv_psi_transport_destroy(nit->transport);
+ kfree(nit);
+}
+
+void vidtv_psi_eit_table_update_sec_len(struct vidtv_psi_table_eit *eit)
+{
+ struct vidtv_psi_table_eit_event *e = eit->event;
+ u16 desc_loop_len;
+ u16 length = 0;
+
+ /*
+ * from immediately after 'section_length' until
+ * 'last_table_id'
+ */
+ length += EIT_LEN_UNTIL_LAST_TABLE_ID;
+
+ while (e) {
+ /* skip both pointers at the end */
+ length += sizeof(struct vidtv_psi_table_eit_event) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_eit_event *);
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(e->descriptor);
+ vidtv_psi_set_desc_loop_len(&e->bitfield, desc_loop_len, 12);
+
+ length += desc_loop_len;
+
+ e = e->next;
+ }
+
+ length += CRC_SIZE_IN_BYTES;
+
+ vidtv_psi_set_sec_len(&eit->header, length);
+}
+
+void vidtv_psi_eit_event_assign(struct vidtv_psi_table_eit *eit,
+ struct vidtv_psi_table_eit_event *e)
+{
+ do {
+ if (e == eit->event)
+ return;
+
+ eit->event = e;
+ vidtv_psi_eit_table_update_sec_len(eit);
+
+ e = NULL;
+ } while (vidtv_psi_get_sec_len(&eit->header) > EIT_MAX_SECTION_LEN);
+
+ vidtv_psi_update_version_num(&eit->header);
+}
+
+struct vidtv_psi_table_eit
+*vidtv_psi_eit_table_init(u16 network_id,
+ u16 transport_stream_id,
+ __be16 service_id)
+{
+ struct vidtv_psi_table_eit *eit;
+ const u16 SYNTAX = 0x1;
+ const u16 ONE = 0x1;
+ const u16 ONES = 0x03;
+
+ eit = kzalloc(sizeof(*eit), GFP_KERNEL);
+ if (!eit)
+ return NULL;
+
+ eit->header.table_id = 0x4e; //actual_transport_stream: present/following
+
+ eit->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ONE << 14) | (ONES << 12));
+
+ eit->header.id = service_id;
+ eit->header.current_next = ONE;
+
+ eit->header.version = 0x1f;
+
+ eit->header.one2 = ONES;
+ eit->header.section_id = 0;
+ eit->header.last_section = 0;
+
+ eit->transport_id = cpu_to_be16(transport_stream_id);
+ eit->network_id = cpu_to_be16(network_id);
+
+ eit->last_segment = eit->header.last_section; /* not implemented */
+ eit->last_table_id = eit->header.table_id; /* not implemented */
+
+ vidtv_psi_eit_table_update_sec_len(eit);
+
+ return eit;
+}
+
+u32 vidtv_psi_eit_write_into(struct vidtv_psi_eit_write_args *args)
+{
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+ .h = &args->eit->header,
+ .pid = VIDTV_EIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct psi_write_args psi_args = {
+ .dest_buf = args->buf,
+ .len = sizeof_field(struct vidtv_psi_table_eit, transport_id) +
+ sizeof_field(struct vidtv_psi_table_eit, network_id) +
+ sizeof_field(struct vidtv_psi_table_eit, last_segment) +
+ sizeof_field(struct vidtv_psi_table_eit, last_table_id),
+ .pid = VIDTV_EIT_PID,
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_EIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct crc32_write_args c_args = {
+ .dest_buf = args->buf,
+ .pid = VIDTV_EIT_PID,
+ .dest_buf_sz = args->buf_sz,
+ };
+ struct vidtv_psi_table_eit_event *event = args->eit->event;
+ struct vidtv_psi_desc *event_descriptor;
+ u32 crc = INITIAL_CRC;
+ u32 nbytes = 0;
+
+ vidtv_psi_eit_table_update_sec_len(args->eit);
+
+ h_args.continuity_counter = args->continuity_counter;
+ h_args.crc = &crc;
+
+ nbytes += vidtv_psi_table_header_write_into(&h_args);
+
+ psi_args.from = &args->eit->transport_id;
+ psi_args.dest_offset = args->offset + nbytes;
+ psi_args.continuity_counter = args->continuity_counter;
+ psi_args.crc = &crc;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ /* skip both pointers at the end */
+ psi_args.len = sizeof(struct vidtv_psi_table_eit_event) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_eit_event *);
+ while (event) {
+ /* copy the events, if any */
+ psi_args.from = event;
+ psi_args.dest_offset = args->offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(&psi_args);
+
+ event_descriptor = event->descriptor;
+
+ while (event_descriptor) {
+ /* copy the event descriptors, if any */
+ d_args.dest_offset = args->offset + nbytes;
+ d_args.desc = event_descriptor;
+ d_args.continuity_counter = args->continuity_counter;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(&d_args);
+
+ event_descriptor = event_descriptor->next;
+ }
+
+ event = event->next;
+ }
+
+ c_args.dest_offset = args->offset + nbytes;
+ c_args.crc = cpu_to_be32(crc);
+ c_args.continuity_counter = args->continuity_counter;
+
+ /* Write the CRC at the end */
+ nbytes += table_section_crc32_write_into(&c_args);
+
+ return nbytes;
+}
+
+struct vidtv_psi_table_eit_event
+*vidtv_psi_eit_event_init(struct vidtv_psi_table_eit_event *head, u16 event_id)
+{
+ const u8 DURATION[] = {0x23, 0x59, 0x59}; /* BCD encoded */
+ struct vidtv_psi_table_eit_event *e;
+ struct timespec64 ts;
+ struct tm time;
+ int mjd, l;
+ __be16 mjd_be;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ e->event_id = cpu_to_be16(event_id);
+
+ ts = ktime_to_timespec64(ktime_get_real());
+ time64_to_tm(ts.tv_sec, 0, &time);
+
+ /* Convert date to Modified Julian Date - per EN 300 468 Annex C */
+ if (time.tm_mon < 2)
+ l = 1;
+ else
+ l = 0;
+
+ mjd = 14956 + time.tm_mday;
+ mjd += (time.tm_year - l) * 36525 / 100;
+ mjd += (time.tm_mon + 2 + l * 12) * 306001 / 10000;
+ mjd_be = cpu_to_be16(mjd);
+
+ /*
+ * Store MJD and hour/min/sec to the event.
+ *
+ * Let's make the event to start on a full hour
+ */
+ memcpy(e->start_time, &mjd_be, sizeof(mjd_be));
+ e->start_time[2] = bin2bcd(time.tm_hour);
+ e->start_time[3] = 0;
+ e->start_time[4] = 0;
+
+ /*
+ * TODO: for now, the event will last for a day. Should be
+ * enough for testing purposes, but if one runs the driver
+ * for more than that, the current event will become invalid.
+ * So, we need a better code here in order to change the start
+ * time once the event expires.
+ */
+ memcpy(e->duration, DURATION, sizeof(e->duration));
+
+ e->bitfield = cpu_to_be16(RUNNING << 13);
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = e;
+ }
+
+ return e;
+}
+
+void vidtv_psi_eit_event_destroy(struct vidtv_psi_table_eit_event *e)
+{
+ struct vidtv_psi_table_eit_event *tmp_e = NULL;
+ struct vidtv_psi_table_eit_event *curr_e = e;
+
+ while (curr_e) {
+ tmp_e = curr_e;
+ curr_e = curr_e->next;
+ vidtv_psi_desc_destroy(tmp_e->descriptor);
+ kfree(tmp_e);
+ }
+}
+
+void vidtv_psi_eit_table_destroy(struct vidtv_psi_table_eit *eit)
+{
+ vidtv_psi_eit_event_destroy(eit->event);
+ kfree(eit);
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.h b/drivers/media/test-drivers/vidtv/vidtv_psi.h
index 3f962cc..fdc825e 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_psi.h
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.h
@@ -6,10 +6,6 @@
* technically be broken into one or more sections, we do not do this here,
* hence 'table' and 'section' are interchangeable for vidtv.
*
- * This code currently supports three tables: PAT, PMT and SDT. These are the
- * bare minimum to get userspace to recognize our MPEG transport stream. It can
- * be extended to support more PSI tables in the future.
- *
* Copyright (C) 2020 Daniel W. S. Almeida
*/
@@ -17,7 +13,6 @@
#define VIDTV_PSI_H
#include <linux/types.h>
-#include <asm/byteorder.h>
/*
* all section lengths start immediately after the 'section_length' field
@@ -27,20 +22,28 @@
#define PAT_LEN_UNTIL_LAST_SECTION_NUMBER 5
#define PMT_LEN_UNTIL_PROGRAM_INFO_LENGTH 9
#define SDT_LEN_UNTIL_RESERVED_FOR_FUTURE_USE 8
+#define NIT_LEN_UNTIL_NETWORK_DESCRIPTOR_LEN 7
+#define EIT_LEN_UNTIL_LAST_TABLE_ID 11
#define MAX_SECTION_LEN 1021
+#define EIT_MAX_SECTION_LEN 4093 /* see ETSI 300 468 v.1.10.1 p. 26 */
#define VIDTV_PAT_PID 0 /* mandated by the specs */
#define VIDTV_SDT_PID 0x0011 /* mandated by the specs */
+#define VIDTV_NIT_PID 0x0010 /* mandated by the specs */
+#define VIDTV_EIT_PID 0x0012 /*mandated by the specs */
enum vidtv_psi_descriptors {
REGISTRATION_DESCRIPTOR = 0x05, /* See ISO/IEC 13818-1 section 2.6.8 */
+ NETWORK_NAME_DESCRIPTOR = 0x40, /* See ETSI EN 300 468 section 6.2.27 */
+ SERVICE_LIST_DESCRIPTOR = 0x41, /* See ETSI EN 300 468 section 6.2.35 */
SERVICE_DESCRIPTOR = 0x48, /* See ETSI EN 300 468 section 6.2.33 */
+ SHORT_EVENT_DESCRIPTOR = 0x4d, /* See ETSI EN 300 468 section 6.2.37 */
};
enum vidtv_psi_stream_types {
STREAM_PRIVATE_DATA = 0x06, /* see ISO/IEC 13818-1 2000 p. 48 */
};
-/**
+/*
* struct vidtv_psi_desc - A generic PSI descriptor type.
* The descriptor length is an 8-bit field specifying the total number of bytes of the data portion
* of the descriptor following the byte defining the value of this field.
@@ -52,7 +55,7 @@
u8 data[];
} __packed;
-/**
+/*
* struct vidtv_psi_desc_service - Service descriptor.
* See ETSI EN 300 468 section 6.2.33.
*/
@@ -68,7 +71,7 @@
char *service_name;
} __packed;
-/**
+/*
* struct vidtv_psi_desc_registration - A registration descriptor.
* See ISO/IEC 13818-1 section 2.6.8
*/
@@ -90,7 +93,56 @@
u8 additional_identification_info[];
} __packed;
-/**
+/*
+ * struct vidtv_psi_desc_network_name - A network name descriptor
+ * see ETSI EN 300 468 v1.15.1 section 6.2.27
+ */
+struct vidtv_psi_desc_network_name {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+ char *network_name;
+} __packed;
+
+struct vidtv_psi_desc_service_list_entry {
+ __be16 service_id;
+ u8 service_type;
+ struct vidtv_psi_desc_service_list_entry *next;
+} __packed;
+
+/*
+ * struct vidtv_psi_desc_service_list - A service list descriptor
+ * see ETSI EN 300 468 v1.15.1 section 6.2.35
+ */
+struct vidtv_psi_desc_service_list {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+ struct vidtv_psi_desc_service_list_entry *service_list;
+} __packed;
+
+/*
+ * struct vidtv_psi_desc_short_event - A short event descriptor
+ * see ETSI EN 300 468 v1.15.1 section 6.2.37
+ */
+struct vidtv_psi_desc_short_event {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+ char *iso_language_code;
+ u8 event_name_len;
+ char *event_name;
+ u8 text_len;
+ char *text;
+} __packed;
+
+struct vidtv_psi_desc_short_event
+*vidtv_psi_short_event_desc_init(struct vidtv_psi_desc *head,
+ char *iso_language_code,
+ char *event_name,
+ char *text);
+
+/*
* struct vidtv_psi_table_header - A header that is present for all PSI tables.
*/
struct vidtv_psi_table_header {
@@ -106,7 +158,7 @@
u8 last_section; /* last_section_number */
} __packed;
-/**
+/*
* struct vidtv_psi_table_pat_program - A single program in the PAT
* See ISO/IEC 13818-1 : 2000 p.43
*/
@@ -116,17 +168,18 @@
struct vidtv_psi_table_pat_program *next;
} __packed;
-/**
+/*
* struct vidtv_psi_table_pat - The Program Allocation Table (PAT)
* See ISO/IEC 13818-1 : 2000 p.43
*/
struct vidtv_psi_table_pat {
struct vidtv_psi_table_header header;
- u16 programs; /* Included by libdvbv5, not part of the table and not actually serialized */
+ u16 num_pat;
+ u16 num_pmt;
struct vidtv_psi_table_pat_program *program;
} __packed;
-/**