Merge tag 'usb-4.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
Pull USB fixes from Greg KH:
"Here are a handful of USB driver fixes for 4.14-rc5.
There is the "usual" usb-serial fixes and device ids, USB gadget
fixes, and some more fixes found by the fuzz testing that is happening
on the USB layer right now.
All of these have been in my tree this week with no reported issues"
* tag 'usb-4.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb:
usb: usbtest: fix NULL pointer dereference
usb: gadget: configfs: Fix memory leak of interface directory data
usb: gadget: composite: Fix use-after-free in usb_composite_overwrite_options
usb: misc: usbtest: Fix overflow in usbtest_do_ioctl()
usb: renesas_usbhs: Fix DMAC sequence for receiving zero-length packet
USB: dummy-hcd: Fix deadlock caused by disconnect detection
usb: phy: tegra: Fix phy suspend for UDC
USB: serial: console: fix use-after-free after failed setup
USB: serial: console: fix use-after-free on disconnect
USB: serial: qcserial: add Dell DW5818, DW5819
USB: serial: cp210x: add support for ELV TFD500
USB: serial: cp210x: fix partnum regression
USB: serial: option: add support for TP-Link LTE module
USB: serial: ftdi_sio: add id for Cypress WICED dev board
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap
index 587db52..9467201 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-swap
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap
@@ -14,13 +14,3 @@
still used for tmpfs etc. other users. If set to
false, the global swap readahead algorithm will be
used for all swappable pages.
-
-What: /sys/kernel/mm/swap/vma_ra_max_order
-Date: August 2017
-Contact: Linux memory management mailing list <linux-mm@kvack.org>
-Description: The max readahead size in order for VMA based swap readahead
-
- VMA based swap readahead algorithm will readahead at
- most 1 << max_order pages for each readahead. The
- real readahead size for each readahead will be scaled
- according to the estimation algorithm.
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 57f52cd..9ba04c0 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -2387,7 +2387,7 @@
and packet type ID), so in a "gatewayed" configuration, all
outgoing traffic will generally use the same device. Incoming
traffic may also end up on a single device, but that is
- dependent upon the balancing policy of the peer's 8023.ad
+ dependent upon the balancing policy of the peer's 802.3ad
implementation. In a "local" configuration, traffic will be
distributed across the devices in the bond.
diff --git a/MAINTAINERS b/MAINTAINERS
index 2d3d750..a74227a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5346,9 +5346,7 @@
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: include/linux/fcntl.h
-F: include/linux/fs.h
F: include/uapi/linux/fcntl.h
-F: include/uapi/linux/fs.h
F: fs/fcntl.c
F: fs/locks.c
@@ -5357,6 +5355,8 @@
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/*
+F: include/linux/fs.h
+F: include/uapi/linux/fs.h
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
@@ -7571,7 +7571,7 @@
F: arch/mips/kvm/
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
-M: Alexander Graf <agraf@suse.com>
+M: Paul Mackerras <paulus@ozlabs.org>
L: kvm-ppc@vger.kernel.org
W: http://www.linux-kvm.org/
T: git git://github.com/agraf/linux-2.6.git
diff --git a/Makefile b/Makefile
index 2835863..5bf6fa4 100644
--- a/Makefile
+++ b/Makefile
@@ -933,7 +933,11 @@
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
- $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+ ifdef CONFIG_ORC_UNWINDER
+ $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+ else
+ $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+ endif
SKIP_STACK_VALIDATION := 1
export SKIP_STACK_VALIDATION
endif
diff --git a/arch/Kconfig b/arch/Kconfig
index 1aafb4e..d789a89 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -937,9 +937,6 @@
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text)
-config ARCH_WANT_RELAX_ORDER
- bool
-
config ARCH_HAS_REFCOUNT
bool
help
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 903f3bf..7e25c5c 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return __cmpxchg_small(ptr, old, new, size);
case 4:
- return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
+ return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
+ (u32)old, new);
case 8:
/* lld/scd are only available for MIPS64 */
if (!IS_ENABLED(CONFIG_64BIT))
return __cmpxchg_called_with_bad_pointer();
- return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
+ return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
+ (u64)old, new);
default:
return __cmpxchg_called_with_bad_pointer();
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 100f23d..ac584c5 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
}
static struct plat_stmmacenet_data ls1x_eth0_pdata = {
- .bus_id = 0,
- .phy_addr = -1,
+ .bus_id = 0,
+ .phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B)
- .interface = PHY_INTERFACE_MODE_MII,
+ .interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C)
- .interface = PHY_INTERFACE_MODE_RMII,
+ .interface = PHY_INTERFACE_MODE_RMII,
#endif
- .mdio_bus_data = &ls1x_mdio_bus_data,
- .dma_cfg = &ls1x_eth_dma_cfg,
- .has_gmac = 1,
- .tx_coe = 1,
- .init = ls1x_eth_mux_init,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .rx_queues_to_use = 1,
+ .tx_queues_to_use = 1,
+ .init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth0_resources[] = {
@@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
#ifdef CONFIG_LOONGSON1_LS1B
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
- .bus_id = 1,
- .phy_addr = -1,
- .interface = PHY_INTERFACE_MODE_MII,
- .mdio_bus_data = &ls1x_mdio_bus_data,
- .dma_cfg = &ls1x_eth_dma_cfg,
- .has_gmac = 1,
- .tx_coe = 1,
- .init = ls1x_eth_mux_init,
+ .bus_id = 1,
+ .phy_addr = -1,
+ .interface = PHY_INTERFACE_MODE_MII,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+ .tx_coe = 1,
+ .rx_queues_to_use = 1,
+ .tx_queues_to_use = 1,
+ .init = ls1x_eth_mux_init,
};
static struct resource ls1x_eth1_resources[] = {
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 192542d..16d9ef5 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2558,7 +2558,6 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
return SIGILL;
}
}
@@ -2719,7 +2718,6 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
return SIGILL;
}
}
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 7646891..01b7a87 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
{
int src, dst, r, td, ts, mem_off, b_off;
bool need_swap, did_move, cmp_eq;
- unsigned int target;
+ unsigned int target = 0;
u64 t64;
s64 t64s;
int bpf_op = BPF_OP(insn->code);
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh
index 5c4f936..654d652 100755
--- a/arch/mips/tools/generic-board-config.sh
+++ b/arch/mips/tools/generic-board-config.sh
@@ -30,8 +30,6 @@
boards_origin="$5"
shift 5
-cd "${srctree}"
-
# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
# general case it only serves to obscure the useful output about what actually
# was included.
@@ -48,7 +46,7 @@
esac
for board in $@; do
- board_cfg="arch/mips/configs/generic/board-${board}.config"
+ board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
if [ ! -f "${board_cfg}" ]; then
echo "WARNING: Board config '${board_cfg}' not found"
continue
@@ -84,7 +82,7 @@
done || continue
# Merge this board config fragment into our final config file
- ./scripts/kconfig/merge_config.sh \
+ ${srctree}/scripts/kconfig/merge_config.sh \
-m -O ${objtree} ${cfg} ${board_cfg} \
| grep -Ev '^(#|Using)'
done
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 48da0f5..b82586c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -734,7 +734,29 @@
EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
TRAMP_KVM(PACA_EXGEN, 0x700)
EXC_COMMON_BEGIN(program_check_common)
- EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+ /*
+ * It's possible to receive a TM Bad Thing type program check with
+ * userspace register values (in particular r1), but with SRR1 reporting
+ * that we came from the kernel. Normally that would confuse the bad
+ * stack logic, and we would report a bad kernel stack pointer. Instead
+ * we switch to the emergency stack if we're taking a TM Bad Thing from
+ * the kernel.
+ */
+ li r10,MSR_PR /* Build a mask of MSR_PR .. */
+ oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
+ and r10,r10,r12 /* Mask SRR1 with that. */
+ srdi r10,r10,8 /* Shift it so we can compare */
+ cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
+ bne 1f /* If != go to normal path. */
+
+ /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
+ andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
+ /* 3 in EXCEPTION_PROLOG_COMMON */
+ mr r10,r1 /* Save r1 */
+ ld r1,PACAEMERGSP(r13) /* Use emergency stack */
+ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ b 3f /* Jump into the macro !! */
+1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c83c115..b2c0029 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
if (MSR_TM_RESV(msr))
return -EINVAL;
- /* pull in MSR TM from user context */
+ /* pull in MSR TS bits from user context */
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+ /*
+ * Ensure that TM is enabled in regs->msr before we leave the signal
+ * handler. It could be the case that (a) user disabled the TM bit
+ * through the manipulation of the MSR bits in uc_mcontext or (b) the
+ * TM bit was disabled because a sufficient number of context switches
+ * happened whilst in the signal handler and load_tm overflowed,
+ * disabling the TM bit. In either case we can end up with an illegal
+ * TM state leading to a TM Bad Thing when we return to userspace.
+ */
+ regs->msr |= MSR_TM;
+
/* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index c98e90b..b4e2b71 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -181,34 +181,25 @@
* - we have no stack frame and can not allocate one
* - LR points back to the original caller (in A)
* - CTR holds the new NIP in C
- * - r0 & r12 are free
- *
- * r0 can't be used as the base register for a DS-form load or store, so
- * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+ * - r0, r11 & r12 are free
*/
livepatch_handler:
CURRENT_THREAD_INFO(r12, r1)
- /* Save stack pointer into r0 */
- mr r0, r1
-
/* Allocate 3 x 8 bytes */
- ld r1, TI_livepatch_sp(r12)
- addi r1, r1, 24
- std r1, TI_livepatch_sp(r12)
+ ld r11, TI_livepatch_sp(r12)
+ addi r11, r11, 24
+ std r11, TI_livepatch_sp(r12)
/* Save toc & real LR on livepatch stack */
- std r2, -24(r1)
+ std r2, -24(r11)
mflr r12
- std r12, -16(r1)
+ std r12, -16(r11)
/* Store stack end marker */
lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l
- std r12, -8(r1)
-
- /* Restore real stack pointer */
- mr r1, r0
+ std r12, -8(r11)
/* Put ctr in r12 for global entry and branch there */
mfctr r12
@@ -216,36 +207,30 @@
/*
* Now we are returning from the patched function to the original
- * caller A. We are free to use r0 and r12, and we can use r2 until we
+ * caller A. We are free to use r11, r12 and we can use r2 until we
* restore it.
*/
CURRENT_THREAD_INFO(r12, r1)
- /* Save stack pointer into r0 */
- mr r0, r1
-
- ld r1, TI_livepatch_sp(r12)
+ ld r11, TI_livepatch_sp(r12)
/* Check stack marker hasn't been trashed */
lis r2, STACK_END_MAGIC@h
ori r2, r2, STACK_END_MAGIC@l
- ld r12, -8(r1)
+ ld r12, -8(r11)
1: tdne r12, r2
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
/* Restore LR & toc from livepatch stack */
- ld r12, -16(r1)
+ ld r12, -16(r11)
mtlr r12
- ld r2, -24(r1)
+ ld r2, -24(r11)
/* Pop livepatch stack frame */
- CURRENT_THREAD_INFO(r12, r0)
- subi r1, r1, 24
- std r1, TI_livepatch_sp(r12)
-
- /* Restore real stack pointer */
- mr r1, r0
+ CURRENT_THREAD_INFO(r12, r1)
+ subi r11, r11, 24
+ std r11, TI_livepatch_sp(r12)
/* Return to original caller of live patched function */
blr
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5e8418c..f208f56 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
* Logical instructions
*/
case 26: /* cntlzw */
- op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
+ val = (unsigned int) regs->gpr[rd];
+ op->val = ( val ? __builtin_clz(val) : 32 );
goto logical_done;
#ifdef __powerpc64__
case 58: /* cntlzd */
- op->val = __builtin_clzl(regs->gpr[rd]);
+ val = regs->gpr[rd];
+ op->val = ( val ? __builtin_clzl(val) : 64 );
goto logical_done;
#endif
case 28: /* and */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b95c584..a51df9e 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1438,7 +1438,6 @@ int numa_update_cpu_topology(bool cpus_locked)
int arch_update_cpu_topology(void)
{
- lockdep_assert_cpus_held();
return numa_update_cpu_topology(true);
}
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 9ccac86..8812624 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
/* Take the mutex lock for this node and then decrement the reference count */
mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ /*
+ * The scenario where this is true is, when perf session is
+ * started, followed by offlining of all cpus in a given node.
+ *
+ * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
+ * function set the ref->count to zero, if the cpu which is
+ * about to offline is the last cpu in a given node and make
+ * an OPAL call to disable the engine in that node.
+ *
+ */
+ mutex_unlock(&ref->lock);
+ return;
+ }
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
/* We need only vbase for core counters */
mem_info->vbase = page_address(alloc_pages_node(phys_id,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
- get_order(size)));
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size)));
if (!mem_info->vbase)
return -ENOMEM;
@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
return;
mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ /*
+ * The scenario where this is true is, when perf session is
+ * started, followed by offlining of all cpus in a given core.
+ *
+ * In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
+ * function set the ref->count to zero, if the cpu which is
+ * about to offline is the last cpu in a given core and make
+ * an OPAL call to disable the engine in that core.
+ *
+ */
+ mutex_unlock(&ref->lock);
+ return;
+ }
ref->refc--;
if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
* free the memory in cpu offline path.
*/
local_mem = page_address(alloc_pages_node(phys_id,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
- get_order(size)));
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size)));
if (!local_mem)
return -ENOMEM;
@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
}
/* Only free the attr_groups which are dynamically allocated */
- kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
+ if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
+ kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr);
return;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 0be3828..4e83f95 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -44,7 +44,6 @@
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select LOCKDEP_SMALL if LOCKDEP
- select ARCH_WANT_RELAX_ORDER
config SPARC32
def_bool !64BIT
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 8a13d46..50e0d2b 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -176,7 +176,7 @@
/*
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The
* frame pointer is replaced with an encoded pointer to pt_regs. The encoding
- * is just setting the LSB, which makes it an invalid stack address and is also
+ * is just clearing the MSB, which makes it an invalid stack address and is also
* a signal to the unwinder that it's a pt_regs pointer in disguise.
*
* NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
@@ -185,7 +185,7 @@
.macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER
mov %esp, %ebp
- orl $0x1, %ebp
+ andl $0x7fffffff, %ebp
#endif
.endm
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 1c5390f..d45e063 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
pmus[i].type = type;
pmus[i].boxes = kzalloc(size, GFP_KERNEL);
if (!pmus[i].boxes)
- return -ENOMEM;
+ goto err;
}
type->pmus = pmus;
@@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
sizeof(*attr_group), GFP_KERNEL);
if (!attr_group)
- return -ENOMEM;
+ goto err;
attrs = (struct attribute **)(attr_group + 1);
attr_group->name = "events";
@@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
}
type->pmu_group = &uncore_pmu_attr_group;
+
return 0;
+
+err:
+ for (i = 0; i < type->num_boxes; i++)
+ kfree(pmus[i].boxes);
+ kfree(pmus);
+
+ return -ENOMEM;
}
static int __init
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 1a8eb55..a5db63f 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
u32 *hv_vp_index;
EXPORT_SYMBOL_GPL(hv_vp_index);
+u32 hv_max_vp_index;
+
static int hv_cpu_init(unsigned int cpu)
{
u64 msr_vp_index;
@@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
hv_vp_index[smp_processor_id()] = msr_vp_index;
+ if (msr_vp_index > hv_max_vp_index)
+ hv_max_vp_index = msr_vp_index;
+
return 0;
}
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 39e7f6e..9cc9e1c 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
-static struct hv_flush_pcpu __percpu *pcpu_flush;
+static struct hv_flush_pcpu __percpu **pcpu_flush;
-static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
+static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
/*
* Fills in gva_list starting from offset. Returns the number of items added.
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ /* valid_bank_mask can represent up to 64 banks */
+ if (hv_max_vp_index / 64 >= 64)
+ return 0;
+
+ /*
+ * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
+ * structs are not cleared between calls, we risk flushing unneeded
+ * vCPUs otherwise.
+ */
+ for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+ flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
+
/*
* Some banks may end up being empty but this is acceptable.
*/
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
vcpu = hv_cpu_number_to_vp_number(cpu);
vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64;
-
- /* valid_bank_mask can represent up to 64 banks */
- if (vcpu_bank >= 64)
- return 0;
-
__set_bit(vcpu_offset, (unsigned long *)
&flush->hv_vp_set.bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank)
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
int cpu, vcpu, gva_n, max_gvas;
+ struct hv_flush_pcpu **flush_pcpu;
struct hv_flush_pcpu *flush;
u64 status = U64_MAX;
unsigned long flags;
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
local_irq_save(flags);
- flush = this_cpu_ptr(pcpu_flush);
+ flush_pcpu = this_cpu_ptr(pcpu_flush);
+
+ if (unlikely(!*flush_pcpu))
+ *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+ flush = *flush_pcpu;
+
+ if (unlikely(!flush)) {
+ local_irq_restore(flags);
+ goto do_native;
+ }
if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd);
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
const struct flush_tlb_info *info)
{
int nr_bank = 0, max_gvas, gva_n;
+ struct hv_flush_pcpu_ex **flush_pcpu;
struct hv_flush_pcpu_ex *flush;
u64 status = U64_MAX;
unsigned long flags;
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
local_irq_save(flags);
- flush = this_cpu_ptr(pcpu_flush_ex);
+ flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
+
+ if (unlikely(!*flush_pcpu))
+ *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
+
+ flush = *flush_pcpu;
+
+ if (unlikely(!flush)) {
+ local_irq_restore(flags);
+ goto do_native;
+ }
if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd);
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
- 0, nr_bank + 2, flush, NULL);
+ 0, nr_bank, flush, NULL);
} else if (info->end &&
((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
- 0, nr_bank + 2, flush, NULL);
+ 0, nr_bank, flush, NULL);
} else {
gva_n = fill_gva_list(flush->gva_list, nr_bank,
info->start, info->end);
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
- gva_n, nr_bank + 2, flush, NULL);
+ gva_n, nr_bank, flush, NULL);
}
local_irq_restore(flags);
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
return;
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+ pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
else
- pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+ pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index e7636ba..6c98821 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -62,8 +62,10 @@
#define new_len2 145f-144f
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index c096624..ccbe24e 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
alt_end_marker ":\n"
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
- * The additional "-" is needed because gas works with s32s.
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
-#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 1812649..8edac1d 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -187,7 +187,6 @@ struct mca_msr_regs {
extern struct mce_vendor_flags mce_flags;
-extern struct mca_config mca_cfg;
extern struct mca_msr_regs msr_ops;
enum mce_notifier_prios {
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index c120b5d..3c856a1 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
DEBUG_LOCKS_WARN_ON(preemptible());
}
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
- int cpu = smp_processor_id();
-
- if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
- cpumask_clear_cpu(cpu, mm_cpumask(mm));
-}
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 738503e..530f448 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
* to this information.
*/
extern u32 *hv_vp_index;
+extern u32 hv_max_vp_index;
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4893abf..d362161 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -83,6 +83,13 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#endif
/*
+ * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
+ * to init_mm when we switch to a kernel thread (e.g. the idle thread). If
+ * it's false, then we immediately switch CR3 when entering a kernel thread.
+ */
+DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+
+/*
* 6 because 6 should be plenty and struct tlb_state will fit in
* two cache lines.
*/
@@ -105,6 +112,23 @@ struct tlb_state {
u16 next_asid;
/*
+ * We can be in one of several states:
+ *
+ * - Actively using an mm. Our CPU's bit will be set in
+ * mm_cpumask(loaded_mm) and is_lazy == false;
+ *
+ * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
+ * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
+ *
+ * - Lazily using a real mm. loaded_mm != &init_mm, our bit
+ * is set in mm_cpumask(loaded_mm), but is_lazy == true.
+ * We're heuristically guessing that the CR3 load we
+ * skipped more than makes up for the overhead added by
+ * lazy mode.
+ */
+ bool is_lazy;
+
+ /*
* Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one.
*/
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d705c76..ff89177 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
return ~0U;
}
+static u32 skx_deadline_rev(void)
+{
+ switch (boot_cpu_data.x86_mask) {
+ case 0x03: return 0x01000136;
+ case 0x04: return 0x02000014;
+ }
+
+ return ~0U;
+}
+
static const struct x86_cpu_id deadline_match[] = {
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014),
+ DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
@@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
const struct x86_cpu_id *m;
u32 rev;
- if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+ if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
+ boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
m = x86_match_cpu(deadline_match);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 098530a..debb974 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -1,3 +1,6 @@
+#ifndef __X86_MCE_INTERNAL_H__
+#define __X86_MCE_INTERNAL_H__
+
#include <linux/device.h>
#include <asm/mce.h>
@@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { }
static inline void mce_register_injector_chain(struct notifier_block *nb) { }
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
#endif
+
+extern struct mca_config mca_cfg;
+
+#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 40e28ed..486f640 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -28,6 +28,8 @@
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
+#include "mce-internal.h"
+
#define NR_BLOCKS 5
#define THRESHOLD_MAX 0xFFF
#define INT_TYPE_APIC 0x00020000
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 86e8f0b..c4fa4a8 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
bool *res = &dis_ucode_ldr;
#endif
- if (!have_cpuid_p())
- return *res;
-
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
void __init load_ucode_bsp(void)
{
unsigned int cpuid_1_eax;
+ bool intel = true;
- if (check_loader_disabled_bsp())
+ if (!have_cpuid_p())
return;
cpuid_1_eax = native_cpuid_eax(1);
switch (x86_cpuid_vendor()) {
case X86_VENDOR_INTEL:
- if (x86_family(cpuid_1_eax) >= 6)
- load_ucode_intel_bsp();
+ if (x86_family(cpuid_1_eax) < 6)
+ return;
break;
+
case X86_VENDOR_AMD:
- if (x86_family(cpuid_1_eax) >= 0x10)
- load_ucode_amd_bsp(cpuid_1_eax);
+ if (x86_family(cpuid_1_eax) < 0x10)
+ return;
+ intel = false;
break;
+
default:
- break;
+ return;
}
+
+ if (check_loader_disabled_bsp())
+ return;
+
+ if (intel)
+ load_ucode_intel_bsp();
+ else
+ load_ucode_amd_bsp(cpuid_1_eax);
}
static bool check_loader_disabled_ap(void)
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index db2182d..3fc0f9a 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -3,6 +3,15 @@
/* Kprobes and Optprobes common header */
+#include <asm/asm.h>
+
+#ifdef CONFIG_FRAME_POINTER
+# define SAVE_RBP_STRING " push %" _ASM_BP "\n" \
+ " mov %" _ASM_SP ", %" _ASM_BP "\n"
+#else
+# define SAVE_RBP_STRING " push %" _ASM_BP "\n"
+#endif
+
#ifdef CONFIG_X86_64
#define SAVE_REGS_STRING \
/* Skip cs, ip, orig_ax. */ \
@@ -17,7 +26,7 @@
" pushq %r10\n" \
" pushq %r11\n" \
" pushq %rbx\n" \
- " pushq %rbp\n" \
+ SAVE_RBP_STRING \
" pushq %r12\n" \
" pushq %r13\n" \
" pushq %r14\n" \
@@ -48,7 +57,7 @@
" pushl %es\n" \
" pushl %ds\n" \
" pushl %eax\n" \
- " pushl %ebp\n" \
+ SAVE_RBP_STRING \
" pushl %edi\n" \
" pushl %esi\n" \
" pushl %edx\n" \
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f015371..0742491 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* raw stack chunk with redzones:
*/
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
- regs->flags &= ~X86_EFLAGS_IF;
- trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
/*
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 54180fa..add33f6 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
load_cr3(initial_page_table);
#else
write_cr3(real_mode_header->trampoline_pgd);
+
+ /* Exiting long mode will fail if CR4.PCIDE is set. */
+ if (static_cpu_has(X86_FEATURE_PCID))
+ cr4_clear_bits(X86_CR4_PCIDE);
#endif
/* Jump to the identity-mapped low memory code */
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index d145a0b..3dc26f9 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
state->stack_info.type, state->stack_info.next_sp,
state->stack_mask, state->graph_idx);
- for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+ for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
+ sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
break;
@@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
* This determines if the frame pointer actually contains an encoded pointer to
* pt_regs on the stack. See ENCODE_FRAME_POINTER.
*/
+#ifdef CONFIG_X86_64
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
{
unsigned long regs = (unsigned long)bp;
@@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
return (struct pt_regs *)(regs & ~0x1);
}
+#else
+static struct pt_regs *decode_frame_pointer(unsigned long *bp)
+{
+ unsigned long regs = (unsigned long)bp;
+
+ if (regs & 0x80000000)
+ return NULL;
+
+ return (struct pt_regs *)(regs | 0x80000000);
+}
+#endif
+
+#ifdef CONFIG_X86_32
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
+#else
+#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
+#endif
static bool update_stack_state(struct unwind_state *state,
unsigned long *next_bp)
@@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
regs = decode_frame_pointer(next_bp);
if (regs) {
frame = (unsigned long *)regs;
- len = regs_size(regs);
+ len = KERNEL_REGS_SIZE;
state->got_irq = true;
} else {
frame = next_bp;
@@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
frame < prev_frame_end)
return false;
+ /*
+ * On 32-bit with user mode regs, make sure the last two regs are safe
+ * to access:
+ */
+ if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
+ !on_stack(info, frame, len + 2*sizeof(long)))
+ return false;
+
/* Move state to the next frame: */
if (regs) {
state->regs = regs;
@@ -328,6 +355,13 @@ bool unwind_next_frame(struct unwind_state *state)
state->regs->sp < (unsigned long)task_pt_regs(state->task))
goto the_end;
+ /*
+ * There are some known frame pointer issues on 32-bit. Disable
+ * unwinder warnings on 32-bit until it gets objtool support.
+ */
+ if (IS_ENABLED(CONFIG_X86_32))
+ goto the_end;
+
if (state->regs) {
printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 106d4a0..7a69cf0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
unsigned level, unsigned gpte)
{
/*
- * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
- * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
- * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
- */
- gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
-
- /*
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
* If it is clear, there are no large pages at this level, so clear
* PT_PAGE_SIZE_MASK in gpte if that is the case.
*/
gpte &= level - mmu->last_nonleaf_level;
+ /*
+ * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
+ * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+ * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+ */
+ gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
+
return gpte & PT_PAGE_SIZE_MASK;
}
@@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
+ update_last_nonleaf_level(vcpu, context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 86b68dc..f18d1f8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -334,10 +334,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
--walker->level;
index = PT_INDEX(addr, walker->level);
-
table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset;
+
+ BUG_ON(walker->level < 1);
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a2b804e..95a0160 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
- kvm_set_cr4(vcpu, vmcs12->host_cr4);
+ vmx_set_cr4(vcpu, vmcs12->host_cr4);
nested_ept_uninit_mmu_context(vcpu);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 72bf8c0..e1f0958 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,12 @@
-# Kernel does not boot with instrumentation of tlb.c.
-KCOV_INSTRUMENT_tlb.o := n
+# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
+KCOV_INSTRUMENT_tlb.o := n
+KCOV_INSTRUMENT_mem_encrypt.o := n
+
+KASAN_SANITIZE_mem_encrypt.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_mem_encrypt.o = -pg
+endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o setup_nx.o tlb.o
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49d9778..658bf00 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,6 +30,8 @@
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
+DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
+
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush)
{
@@ -80,7 +82,7 @@ void leave_mm(int cpu)
return;
/* Warn if we're not lazy. */
- WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
+ WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
switch_mm(NULL, &init_mm, NULL);
}
@@ -142,45 +144,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
__flush_tlb_all();
}
#endif
+ this_cpu_write(cpu_tlbstate.is_lazy, false);
if (real_prev == next) {
VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
next->context.ctx_id);
- if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
- /*
- * There's nothing to do: we weren't lazy, and we
- * aren't changing our mm. We don't need to flush
- * anything, nor do we need to update CR3, CR4, or
- * LDTR.
- */
- return;
- }
-
- /* Resume remote flushes and then read tlb_gen. */
- cpumask_set_cpu(cpu, mm_cpumask(next));
- next_tlb_gen = atomic64_read(&next->context.tlb_gen);
-
- if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
- next_tlb_gen) {
- /*
- * Ideally, we'd have a flush_tlb() variant that
- * takes the known CR3 value as input. This would
- * be faster on Xen PV and on hypothetical CPUs
- * on which INVPCID is fast.
- */
- this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
- next_tlb_gen);
- write_cr3(build_cr3(next, prev_asid));
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
- TLB_FLUSH_ALL);
- }
-
/*
- * We just exited lazy mode, which means that CR4 and/or LDTR
- * may be stale. (Changes to the required CR4 and LDTR states
- * are not reflected in tlb_gen.)
+ * We don't currently support having a real mm loaded without
+ * our cpu set in mm_cpumask(). We have all the bookkeeping
+ * in place to figure out whether we would need to flush
+ * if our cpu were cleared in mm_cpumask(), but we don't
+ * currently use it.
*/
+ if (WARN_ON_ONCE(real_prev != &init_mm &&
+ !cpumask_test_cpu(cpu, mm_cpumask(next))))
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ return;
} else {
u16 new_asid;
bool need_flush;
@@ -199,10 +180,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
}
/* Stop remote flushes for the previous mm */
- if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
- cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
-
- VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
+ VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
+ real_prev != &init_mm);
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
/*
* Start remote flushes and then read tlb_gen.
@@ -233,6 +213,37 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
}
/*
+ * enter_lazy_tlb() is a hint from the scheduler that we are entering a
+ * kernel thread or other context without an mm. Acceptable implementations
+ * include doing nothing whatsoever, switching to init_mm, or various clever
+ * lazy tricks to try to minimize TLB flushes.
+ *
+ * The scheduler reserves the right to call enter_lazy_tlb() several times
+ * in a row. It will notify us that we're going back to a real mm by
+ * calling switch_mm_irqs_off().
+ */
+void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
+ return;
+
+ if (static_branch_unlikely(&tlb_use_lazy_mode)) {
+ /*
+ * There's a significant optimization that may be possible
+ * here. We have accurate enough TLB flush tracking that we
+ * don't need to maintain coherence of TLB per se when we're
+ * lazy. We do, however, need to maintain coherence of
+ * paging-structure caches. We could, in principle, leave our
+ * old mm loaded and only switch to init_mm when
+ * tlb_remove_page() happens.
+ */
+ this_cpu_write(cpu_tlbstate.is_lazy, true);
+ } else {
+ switch_mm(NULL, &init_mm, NULL);
+ }
+}
+
+/*
* Call this when reinitializing a CPU. It fixes the following potential
* problems:
*
@@ -303,16 +314,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
/* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled());
+ if (unlikely(loaded_mm == &init_mm))
+ return;
+
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);
- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
+ if (this_cpu_read(cpu_tlbstate.is_lazy)) {
/*
- * We're in lazy mode -- don't flush. We can get here on
- * remote flushes due to races and on local flushes if a
- * kernel thread coincidentally flushes the mm it's lazily
- * still using.
+ * We're in lazy mode. We need to at least flush our
+ * paging-structure cache to avoid speculatively reading
+ * garbage into our TLB. Since switching to init_mm is barely
+ * slower than a minimal flush, just switch to init_mm.
*/
+ switch_mm_irqs_off(NULL, &init_mm, NULL);
return;
}
@@ -611,3 +626,57 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);
+
+static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[2];
+
+ buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
+ buf[1] = '\n';
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t tlblazy_write_file(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ bool val;
+
+ if (kstrtobool_from_user(user_buf, count, &val))
+ return -EINVAL;
+
+ if (val)
+ static_branch_enable(&tlb_use_lazy_mode);
+ else
+ static_branch_disable(&tlb_use_lazy_mode);
+
+ return count;
+}
+
+static const struct file_operations fops_tlblazy = {
+ .read = tlblazy_read_file,
+ .write = tlblazy_write_file,
+ .llseek = default_llseek,
+};
+
+static int __init init_tlb_use_lazy_mode(void)
+{
+ if (boot_cpu_has(X86_FEATURE_PCID)) {
+ /*
+ * Heuristic: with PCID on, switching to and from
+ * init_mm is reasonably fast, but remote flush IPIs
+ * as expensive as ever, so turn off lazy TLB mode.
+ *
+ * We can't do this in setup_pcid() because static keys
+ * haven't been initialized yet, and it would blow up
+ * badly.
+ */
+ static_branch_disable(&tlb_use_lazy_mode);
+ }
+
+ debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
+ arch_debugfs_dir, NULL, &fops_tlblazy);
+ return 0;
+}
+late_initcall(init_tlb_use_lazy_mode);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0e7ef69..d669e9d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
- "x86/xen/hvm_guest:prepare",
+ "x86/xen/guest:prepare",
cpu_up_prepare_cb, cpu_dead_cb);
if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "x86/xen/hvm_guest:online",
+ "x86/xen/guest:online",
xen_cpu_up_online, NULL);
if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
diff --git a/block/bio.c b/block/bio.c
index b38e962..101c2a9 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
bmd->is_our_pages = map_data ? 0 : 1;
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
- iov_iter_init(&bmd->iter, iter->type, bmd->iov,
- iter->nr_segs, iter->count);
+ bmd->iter = *iter;
+ bmd->iter.iov = bmd->iov;
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
int ret, offset;
struct iov_iter i;
struct iovec iov;
+ struct bio_vec *bvec;
iov_for_each(iov, i, *iter) {
unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
ret = get_user_pages_fast(uaddr, local_nr_pages,
(iter->type & WRITE) != WRITE,
&pages[cur_page]);
- if (ret < local_nr_pages) {
+ if (unlikely(ret < local_nr_pages)) {
+ for (j = cur_page; j < page_limit; j++) {
+ if (!pages[j])
+ break;
+ put_page(pages[j]);
+ }
ret = -EFAULT;
goto out_unmap;
}
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
offset = offset_in_page(uaddr);
for (j = cur_page; j < page_limit; j++) {
unsigned int bytes = PAGE_SIZE - offset;
+ unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (len <= 0)
break;
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bytes)
break;
+ /*
+ * check if vector was merged with previous
+ * drop page reference if needed
+ */
+ if (bio->bi_vcnt == prev_bi_vcnt)
+ put_page(pages[j]);
+
len -= bytes;
offset = 0;
}
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio;
out_unmap:
- for (j = 0; j < nr_pages; j++) {
- if (!pages[j])
- break;
- put_page(pages[j]);
+ bio_for_each_segment_all(bvec, bio, j) {
+ put_page(bvec->bv_page);
}
out:
kfree(pages);
diff --git a/crypto/shash.c b/crypto/shash.c
index 5e31c8d..325a14d 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
- buffer = kmalloc(absize, GFP_KERNEL);
+ buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
- struct scatterlist *sg = req->src;
- unsigned int offset = sg->offset;
unsigned int nbytes = req->nbytes;
+ struct scatterlist *sg;
+ unsigned int offset;
int err;
- if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+ if (nbytes &&
+ (sg = req->src, offset = sg->offset,
+ nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 4faa0fd..d5692e3 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
static int skcipher_walk_first(struct skcipher_walk *walk)
{
- walk->nbytes = 0;
-
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
- if (unlikely(!walk->total))
- return 0;
-
walk->buffer = NULL;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = skcipher_copy_iv(walk);
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ walk->total = req->cryptlen;
+ walk->nbytes = 0;
+
+ if (unlikely(!walk->total))
+ return 0;
+
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->total = req->cryptlen;
walk->iv = req->iv;
walk->oiv = req->iv;
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int err;
+ walk->nbytes = 0;
+
+ if (unlikely(!walk->total))
+ return 0;
+
walk->flags &= ~SKCIPHER_WALK_PHYS;
scatterwalk_start(&walk->in, req->src);
diff --git a/crypto/xts.c b/crypto/xts.c
index d86c11a..e31828e 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx->name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
+ "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
+ err = -ENAMETOOLONG;
+ goto err_drop_spawn;
+ }
} else
goto err_drop_spawn;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3fb8ff5..e26ea20 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
* }
* }
*
- * Calling this function with index %2 return %-ENOENT and with index %3
- * returns the last entry. If the property does not contain any more values
- * %-ENODATA is returned. The NULL entry must be single integer and
- * preferably contain value %0.
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
*
* Return: %0 on success, negative error code on failure.
*/
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
data = acpi_device_data_of_node(fwnode);
if (!data)
- return -EINVAL;
+ return -ENOENT;
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
- return ret;
+ return ret == -EINVAL ? -ENOENT : -EINVAL;
/*
* The simplest case is when the value is a single reference. Just
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(obj->reference.handle, &device);
if (ret)
- return ret;
+ return ret == -ENODEV ? -EINVAL : ret;
args->adev = device;
args->nargs = 0;
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
* The index argument is then used to determine which reference
* the caller wants (along with the arguments).
*/
- if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
- return -EPROTO;
+ if (obj->type != ACPI_TYPE_PACKAGE)
+ return -EINVAL;
+ if (index >= obj->package.count)
+ return -ENOENT;
element = obj->package.elements;
end = element + obj->package.count;
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(element->reference.handle,
&device);
if (ret)
- return -ENODEV;
+ return -EINVAL;
nargs = 0;
element++;
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
else if (type == ACPI_TYPE_LOCAL_REFERENCE)
break;
else
- return -EPROTO;
+ return -EINVAL;
}
if (nargs > MAX_ACPI_REFERENCE_ARGS)
- return -EPROTO;
+ return -EINVAL;
if (idx == index) {
args->adev = device;
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
element++;
} else {
- return -EPROTO;
+ return -EINVAL;
}
idx++;
}
- return -ENODATA;
+ return -ENOENT;
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3855902..aae2402 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{
+ ssize_t n;
+ cpumask_var_t mask;
struct node *node_dev = to_node(dev);
- const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
- return cpumap_print_to_pagebuf(list, buf, mask);
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return 0;
+
+ cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+ n = cpumap_print_to_pagebuf(list, buf, mask);
+ free_cpumask_var(mask);
+
+ return n;
}
static inline ssize_t node_read_cpumask(struct device *dev,
diff --git a/drivers/base/property.c b/drivers/base/property.c
index d0b65bb..7ed99c1 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,6 +21,7 @@
#include <linux/phy.h>
struct property_set {
+ struct device *dev;
struct fwnode_handle fwnode;
const struct property_entry *properties;
};
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
* Caller is responsible to call fwnode_handle_put() on the returned
* args->fwnode pointer.
*
+ * Returns: %0 on success
+ * %-ENOENT when the index is out of bounds, the index has an empty
+ * reference or the property was not found
+ * %-EINVAL on parse error
*/
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
void device_remove_properties(struct device *dev)
{
struct fwnode_handle *fwnode;
+ struct property_set *pset;
fwnode = dev_fwnode(dev);
if (!fwnode)
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset.
*/
- if (is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode);
+ if (pset) {
set_primary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
} else {
- fwnode = fwnode->secondary;
- if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode->secondary);
+ if (pset && dev == pset->dev)
set_secondary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
- }
}
+ if (pset && dev == pset->dev)
+ pset_free_set(pset);
}
EXPORT_SYMBOL_GPL(device_remove_properties);
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode);
+ p->dev = dev;
return 0;
}
EXPORT_SYMBOL_GPL(device_add_properties);
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index d9fbbf0..0f9754e 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
/* The crypto framework makes it hard to avoid this global. */
static struct device *artpec6_crypto_dev;
-static struct dentry *dbgfs_root;
-
#ifdef CONFIG_FAULT_INJECTION
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
char *desc;
};
+static struct dentry *dbgfs_root;
+
static void artpec6_crypto_init_debugfs(void)
{
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index b585ce5..4835dd4 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct scatterlist sg[1], *tsg;
- int err = 0, len = 0, reg, ncp;
+ int err = 0, len = 0, reg, ncp = 0;
unsigned int i;
- const u32 *buffer = (const u32 *)rctx->buffer;
+ u32 *buffer = (void *)rctx->buffer;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
reg |= HASH_CR_DMAA;
stm32_hash_write(hdev, HASH_CR, reg);
- for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
- stm32_hash_write(hdev, HASH_DIN, buffer[i]);
-
- stm32_hash_set_nblw(hdev, ncp);
+ if (ncp) {
+ memset(buffer + ncp, 0,
+ DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
+ writesl(hdev->io_base + HASH_DIN, buffer,
+ DIV_ROUND_UP(ncp, sizeof(u32)));
+ }
+ stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
stm32_hash_write(hdev, HASH_STR, reg);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 66fb40d..0383063 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -383,7 +383,7 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file,
return err;
}
-static void sync_fill_fence_info(struct dma_fence *fence,
+static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
+
+ return info->status;
}
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
* sync_fence_info and return the actual number of fences on
* info->num_fences.
*/
- if (!info.num_fences)
+ if (!info.num_fences) {
+ info.status = dma_fence_is_signaled(sync_file->fence);
goto no_fences;
+ } else {
+ info.status = 1;
+ }
if (info.num_fences < num_fences)
return -EINVAL;
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < num_fences; i++)
- sync_fill_fence_info(fences[i], &fence_info[i]);
+ for (i = 0; i < num_fences; i++) {
+ int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+ info.status = info.status <= 0 ? info.status : status;
+ }
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
sync_file_get_name(sync_file, info.name, sizeof(info.name));
- info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 32905d5..339186f 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@ struct msgdma_device {
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
{
struct msgdma_sw_desc *desc;
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
list_del(&desc->node);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list);
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
struct msgdma_device *mdev = to_mdev(tx->chan);
struct msgdma_sw_desc *new;
dma_cookie_t cookie;
+ unsigned long flags;
new = tx_to_desc(tx);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
return cookie;
}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct msgdma_extended_desc *desc;
size_t copy;
u32 desc_cnt;
+ unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
do {
/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
u32 desc_cnt = 0, i;
struct scatterlist *sg;
u32 stride;
+ unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl);
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
static void msgdma_issue_pending(struct dma_chan *chan)
{
struct msgdma_device *mdev = to_mdev(chan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_start_transfer(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
static void msgdma_free_chan_resources(struct dma_chan *dchan)
{
struct msgdma_device *mdev = to_mdev(dchan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_free_descriptors(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
kfree(mdev->sw_desq);
}
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
u32 count;
u32 __maybe_unused size;
u32 __maybe_unused status;
+ unsigned long flags;
- spin_lock(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
* bits. So we need to just drop these values.
*/
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
- status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
+ status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
}
- spin_unlock(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3879f80..a7ea20e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
- unsigned int width, pset_len;
+ unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
+ /* Align the array size (acnt block) with the transfer properties */
+ switch (__ffs((src | dest | len))) {
+ case 0:
+ array_size = SZ_32K - 1;
+ break;
+ case 1:
+ array_size = SZ_32K - 2;
+ break;
+ default:
+ array_size = SZ_32K - 4;
+ break;
+ }
+
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
- width = SZ_32K - 1;
+ width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
dest += pset_len;
src += pset_len;
- pset_len = width = len % (SZ_32K - 1);
+ pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 2f65a8f..f1d04b7 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_lock(&xbar->mutex);
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
xbar->dma_requests);
- mutex_unlock(&xbar->mutex);
if (map->xbar_out == xbar->dma_requests) {
+ mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
+ mutex_unlock(&xbar->mutex);
map->xbar_in = (u16)dma_spec->args[0];
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3388d54..3f80f16 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -453,7 +453,8 @@
config GPIO_THUNDERX
tristate "Cavium ThunderX/OCTEON-TX GPIO"
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
- depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+ depends on PCI_MSI
+ select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS
help
Say yes here to support the on-chip GPIO lines on the ThunderX
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index dbf869f..3233b72 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- irq_set_handler_locked(d, handle_edge_irq);
+ /*
+ * Edge IRQs are already cleared/acked in irq_handler and
+ * not need to be masked, as result handle_edge_irq()
+ * logic is excessed here and may cause lose of interrupts.
+ * So just use handle_simple_irq.
+ */
+ irq_set_handler_locked(d, handle_simple_irq);
return 0;
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
{
void __iomem *isr_reg = NULL;
- u32 isr;
+ u32 enabled, isr, level_mask;
unsigned int bit;
struct gpio_bank *bank = gpiobank;
unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
pm_runtime_get_sync(bank->chip.parent);
while (1) {
- u32 isr_saved, level_mask = 0;
- u32 enabled;
-
raw_spin_lock_irqsave(&bank->lock, lock_flags);
enabled = omap_get_gpio_irqbank_mask(bank);
- isr_saved = isr = readl_relaxed(isr_reg) & enabled;
+ isr = readl_relaxed(isr_reg) & enabled;
if (bank->level_mask)
level_mask = bank->level_mask & enabled;
+ else
+ level_mask = 0;
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
executing them */
- omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
- omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+ if (isr & ~level_mask)
+ omap_clear_gpio_irqbank(bank, isr & ~level_mask);
raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
/*---------------------------------------------------------------------*/
-static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+static void omap_gpio_show_rev(struct gpio_bank *bank)
{
static bool called;
u32 rev;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 4d21135..eb4528c 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (pin <= 255) {
char ev_name[5];
- sprintf(ev_name, "_%c%02X",
+ sprintf(ev_name, "_%c%02hhX",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7ef6c28..bc74613 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4e53aae..0028591 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2960,6 +2960,7 @@ int drm_atomic_helper_resume(struct drm_device *dev,
drm_modeset_backoff(&ctx);
}
+ drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19404c9..af289d3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3013,10 +3013,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ unsigned long flags;
+
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO);
- i915_gem_request_submit(request);
+
+ spin_lock_irqsave(&request->engine->timeline->lock, flags);
+ __i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
+ spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
static void engine_set_wedged(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 00c6aee..5d4cd3d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
{
enum port port;
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
if (!dev_priv->vbt.child_dev_num)
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index ff9ecd2..b8315bc 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,7 +74,7 @@
#define I9XX_CSC_COEFF_1_0 \
((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
-static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
{
return !state->degamma_lut &&
!state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
}
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy(state)) {
+ if (!crtc_state_is_legacy_gamma(state)) {
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
}
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
return;
}
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
glk_load_degamma_lut(state);
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
return;
}
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
uint32_t i, lut_size;
uint32_t word0, word1;
- if (crtc_state_is_legacy(state)) {
+ if (crtc_state_is_legacy_gamma(state)) {
/* Turn off degamma/gamma on CGM block. */
I915_WRITE(CGM_PIPE_MODE(pipe),
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
return 0;
/*
- * We also allow no degamma lut and a gamma lut at the legacy
+ * We also allow no degamma lut/ctm and a gamma lut at the legacy
* size (256 entries).
*/
- if (!crtc_state->degamma_lut &&
- crtc_state->gamma_lut &&
- crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+ if (crtc_state_is_legacy_gamma(crtc_state))
return 0;
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 64f7b51..5c7828c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_state *pipe_config;
- int htot = I915_READ(HTOTAL(cpu_transcoder));
- int hsync = I915_READ(HSYNC(cpu_transcoder));
- int vtot = I915_READ(VTOTAL(cpu_transcoder));
- int vsync = I915_READ(VSYNC(cpu_transcoder));
+ u32 htot, hsync, vtot, vsync;
enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
i9xx_crtc_clock_get(intel_crtc, pipe_config);
mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+ cpu_transcoder = pipe_config->cpu_transcoder;
+ htot = I915_READ(HTOTAL(cpu_transcoder));
+ hsync = I915_READ(HSYNC(cpu_transcoder));
+ vtot = I915_READ(VTOTAL(cpu_transcoder));
+ vsync = I915_READ(VSYNC(cpu_transcoder));
+
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6413494..2031986 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_dp->panel_power_off_time = ktime_get_boottime();
wait_panel_off(intel_dp);
+ intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
* seems sufficient to avoid this problem.
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
vbt.t11_t12);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b3a087c..49577eb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{
enum i915_power_well_id id = power_well->id;
bool wait_fuses = power_well->hsw.has_fuses;
- enum skl_power_gate pg;
+ enum skl_power_gate uninitialized_var(pg);
u32 val;
if (wait_fuses) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index dbb31a0..deaf869 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -248,7 +248,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
put_clk:
clk_put(ahb_clk);
put_gdsc:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index c2bdad8..824067d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.caps = MDP_LM_CAP_WB },
},
.nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
},
.dspp = {
.count = 3,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 6fcb58a..4409776 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
- pm_runtime_put_autosuspend(&pdev->dev);
-
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f15821a0..ea5bb0e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence;
int i, ret;
- if (!exclusive) {
- /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
- * which makes this a slightly strange place to call it. OTOH this
- * is a convenient can-fail point to hook it in. (And similar to
- * how etnaviv and nouveau handle this.)
- */
- ret = reservation_object_reserve_shared(msm_obj->resv);
- if (ret)
- return ret;
- }
-
fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv);
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
}
vaddr = msm_gem_get_vaddr(obj);
- if (!vaddr) {
+ if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj);
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(vaddr);
}
if (bo)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5d0a75d..93535ca 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -221,7 +221,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
return ret;
}
-static int submit_fence_sync(struct msm_gem_submit *submit)
+static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{
int i, ret = 0;
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
+ if (!write) {
+ /* NOTE: _reserve_shared() must happen before
+ * _add_shared_fence(), which makes this a slightly
+ * strange place to call it. OTOH this is a
+ * convenient can-fail point to hook it in.
+ */
+ ret = reservation_object_reserve_shared(msm_obj->resv);
+ if (ret)
+ return ret;
+ }
+
+ if (no_implicit)
+ continue;
+
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
if (ret)
break;
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
- ret = submit_fence_sync(submit);
- if (ret)
- goto out;
- }
+ ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
+ if (ret)
+ goto out;
ret = submit_pin_objects(submit);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index ffbff27..6a88703 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->aspace) {
+
+ if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0);
msm_gem_address_space_put(gpu->aspace);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0366b80..ec56794 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_SPACE_TO_END() does not access the tail more
+ * than once.
+ */
n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n);
- fifo->head = (fifo->head + n) & (BUF_SZ - 1);
+ smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
sz -= n;
ptr += n;
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
if (ret)
goto out;
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_CNT_TO_END() does not access the head more than
+ * once.
+ */
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
if (copy_to_user(buf, fptr, n)) {
ret = -EFAULT;
goto out;
}
- fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
+ smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
*ppos += n;
wake_up_all(&rd->fifo_event);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6a573d2..658fa2d 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
return -EINVAL;
}
+ /*
+ * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
+ * i.MX53 channel arbitration locking doesn't seem to work properly.
+ * Allow enabling the lock feature on IPUv3H / i.MX6 only.
+ */
+ if (bursts && ipu->ipu_type != IPUV3H)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
if (channel->num == idmac_lock_en_info[i].chnum)
break;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c35f74c..c860a79 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -73,6 +73,14 @@
#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
+#define IPU_PRE_STORE_ENG_STATUS 0x120
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
+#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
+#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
+#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
+
#define IPU_PRE_STORE_ENG_SIZE 0x130
#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
@@ -93,6 +101,7 @@ struct ipu_pre {
dma_addr_t buffer_paddr;
void *buffer_virt;
bool in_use;
+ unsigned int safe_window_end;
};
static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
u32 active_bpp = info->cpp[0] >> 1;
u32 val;
+ /* calculate safe window for ctrl register updates */
+ pre->safe_window_end = height - 2;
+
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(5);
+ unsigned short current_yblock;
+ u32 val;
+
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
+ return;
+ }
+
+ val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
+ current_yblock =
+ (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
+ IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
+ } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
+
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index ecc9ea4..0013ca9 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -14,6 +14,7 @@
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
val = IPU_PRG_REG_UPDATE_REG_UPDATE;
writel(val, prg->regs + IPU_PRG_REG_UPDATE);
+ /* wait for both double buffers to be filled */
+ readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
+ (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
+ (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
+ 5, 1000);
+
clk_disable_unprepare(prg->clk_ipg);
chan->enabled = true;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 0a3117c..374301f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -281,6 +281,7 @@
Support for ELECOM devices:
- BM084 Bluetooth Mouse
- DEFT Trackball (Wired and wireless)
+ - HUGE Trackball (Wired and wireless)
config HID_ELO
tristate "ELO USB 4000/4500 touchscreen"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9bc9116..330ca98 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e2c7465..54aeea5 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
*/
/*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
- /* The DEFT trackball has eight buttons, but its descriptor only
- * reports five, disabling the three Fn buttons on the top of
- * the mouse.
+ case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
+ case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
+ /* The DEFT/HUGE trackball has eight buttons, but its descriptor
+ * only reports five, disabling the three Fn buttons on the top
+ * of the mouse.
*
* Apply the following diff to the descriptor:
*
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* End Collection, End Collection,
*/
if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
- hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+ hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
rdesc[13] = 8; /* Button/Variable Report Count */
rdesc[21] = 8; /* Button/Variable Usage Maximum */
rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index a989191..be2e005 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -368,6 +368,8 @@
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
+#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 089bad8..045b5da 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
unsigned int rsize = 0;
char *rdesc;
int ret, n;
+ int num_descriptors;
+ size_t offset = offsetof(struct hid_descriptor, desc);
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
+ if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+ dbg_hid("hid descriptor is too short\n");
+ return -EINVAL;
+ }
+
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- for (n = 0; n < hdesc->bNumDescriptors; n++)
+ num_descriptors = min_t(int, hdesc->bNumDescriptors,
+ (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+ for (n = 0; n < num_descriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 51f8215..8e8874d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
- swiotlb = iommu_pass_through ? 1 : 0;
+ swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
iommu_detected = 1;
/*
* In case we don't initialize SWIOTLB (actually the common case
- * when AMD IOMMU is enabled), make sure there are global
- * dma_ops set as a fall-back for devices not handled by this
- * driver (for example non-PCI devices).
+ * when AMD IOMMU is enabled and SME is not active), make sure there
+ * are global dma_ops set as a fall-back for devices not handled by
+ * this driver (for example non-PCI devices). When SME is active,
+ * make sure that swiotlb variable remains set so the global dma_ops
+ * continue to be SWIOTLB.
*/
if (!swiotlb)
dma_ops = &nommu_dma_ops;
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
mutex_unlock(&domain->api_lock);
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
return unmap_size;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index f596fcc..25c2c75 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id sysmmu_of_match[] __initconst = {
+static const struct of_device_id sysmmu_of_match[] = {
{ .compatible = "samsung,exynos-sysmmu", },
{ },
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 49b80da..805ab45 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
return true;
default:
bpf_warn_invalid_xdp_action(action);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
+ /* fall through */
case XDP_DROP:
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 523f9d0..8a32eb7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
**/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
-#ifndef CONFIG_SPARC
- u32 regval;
- u32 i;
-#endif
s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw);
-
-#ifndef CONFIG_SPARC
- /* Disable relaxed ordering */
- for (i = 0; ((i < hw->mac.max_tx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
- }
-
- for (i = 0; ((i < hw->mac.max_rx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2c19070..6e6ab6f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
-#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
- /* Disable relaxed ordering */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
- }
-
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 72c5657..c3e7a81 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
- int i, err = 0;
+ int i, j, err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
/* allocate temporary buffer to store rings in */
- i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
- i = max_t(int, i, adapter->num_xdp_queues);
+ i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
+ adapter->num_rx_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) {
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- memcpy(&temp_ring[i], adapter->xdp_ring[i],
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ memcpy(&temp_ring[i], adapter->xdp_ring[j],
sizeof(struct ixgbe_ring));
temp_ring[i].count = new_tx_count;
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ ixgbe_free_tx_resources(adapter->xdp_ring[j]);
- memcpy(adapter->xdp_ring[i], &temp_ring[i],
+ memcpy(adapter->xdp_ring[j], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d962368..4d76afd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
return;
- vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+ vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return ixgbe_ptp_set_ts_config(adapter, req);
case SIOCGHWTSTAMP:
return ixgbe_ptp_get_ts_config(adapter, req);
+ case SIOCGMIIPHY:
+ if (!adapter->hw.phy.ops.read_reg)
+ return -EOPNOTSUPP;
+ /* fall through */
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 032089e..c16718d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3505,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
- struct mlxsw_sp_lpm_tree *lpm_tree;
-
- /* Aggregate prefix lengths across all virtual routers to make
- * sure we only have used prefix lengths in the LPM tree.
- */
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
- fib->proto);
- if (IS_ERR(lpm_tree))
- goto err_tree_get;
- mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
-
-err_tree_get:
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
return;
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index c3f77e3..e365866 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
static int ppp_dev_init(struct net_device *dev)
{
+ struct ppp *ppp;
+
netdev_lockdep_set_classes(dev);
+
+ ppp = netdev_priv(dev);
+ /* Let the netdevice take a reference on the ppp file. This ensures
+ * that ppp_destroy_interface() won't run before the device gets
+ * unregistered.
+ */
+ atomic_inc(&ppp->file.refcnt);
+
return 0;
}
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
wake_up_interruptible(&ppp->file.rwait);
}
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ ppp = netdev_priv(dev);
+ if (atomic_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->priv_destructor = ppp_dev_priv_destructor;
netif_keep_dst(dev);
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 29c7e2e..52ea80b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -560,6 +560,7 @@ static const struct driver_info wwan_info = {
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
+#define UBLOX_VENDOR_ID 0x1546
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -869,6 +870,18 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&zte_cdc_info,
}, {
+ /* U-blox TOBY-L2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
+ /* U-blox SARA-U2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 260d33c..6389753 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
{
if (!dn || dn != of_stdout || console_set_on_cmdline)
return false;
- return !add_preferred_console(name, index,
- kstrdup(of_stdout_options, GFP_KERNEL));
+
+ /*
+ * XXX: cast `options' to char pointer to suppress complication
+ * warnings: printk, UART and console drivers expect char pointer.
+ */
+ return !add_preferred_console(name, index, (char *)of_stdout_options);
}
EXPORT_SYMBOL_GPL(of_console_check);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index d507c35..32771c2 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,7 +25,7 @@
#include <linux/sort.h>
#include <linux/slab.h>
-#define MAX_RESERVED_REGIONS 16
+#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index fbb7211..264c355 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
struct device_node *np;
/* Get the parent of the port */
- np = of_get_next_parent(to_of_node(fwnode));
+ np = of_get_parent(to_of_node(fwnode));
if (!np)
return NULL;
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 89f4e3d..26ed0c0 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
bridge->sysdata = pcie;
bridge->busnr = 0;
bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0) {
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 9c40da5..1987fec 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,6 +233,7 @@ struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
+ unsigned long pages;
struct mutex lock;
u64 phys;
int irq;
@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
goto err;
}
- /*
- * The PCI host bridge on Tegra contains some logic that intercepts
- * MSI writes, which means that the MSI target address doesn't have
- * to point to actual physical memory. Rather than allocating one 4
- * KiB page of system memory that's never used, we can simply pick
- * an arbitrary address within an area reserved for system memory
- * in the FPCI address map.
- *
- * However, in order to avoid confusion, we pick an address that
- * doesn't map to physical memory. The FPCI address map reserves a
- * 1012 GiB region for system memory and memory-mapped I/O. Since
- * none of the Tegra SoCs that contain this PCI host bridge can
- * address more than 16 GiB of system memory, the last 4 KiB of
- * these 1012 GiB is a good candidate.
- */
- msi->phys = 0xfcfffff000;
+ /* setup AFI/FPCI range */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ msi->phys = virt_to_phys((void *)msi->pages);
afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
+ free_pages(msi->pages, 0);
+
if (msi->irq > 0)
free_irq(msi->irq, pcie);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1778cf4..82cd8b0 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -100,6 +100,7 @@
tristate "AMD GPIO pin control"
depends on GPIOLIB
select GPIOLIB_IRQCHIP
+ select PINMUX
select PINCONF
select GENERIC_PINCONF
help
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0944310..ff78244 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
unsigned long events;
unsigned offset;
unsigned gpio;
- unsigned int type;
events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
events &= mask;
events &= pc->enabled_irq_map[bank];
for_each_set_bit(offset, &events, 32) {
gpio = (32 * bank) + offset;
- /* FIXME: no clue why the code looks up the type here */
- type = pc->irq_type[gpio];
-
generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
gpio));
}
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 04e929f..fadbca9 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
struct gpio_chip *chip = &pctrl->chip;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
+ int irq_base;
*chip = chv_gpio_chip;
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
- ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+ if (!need_valid_mask) {
+ irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+ chip->ngpio, NUMA_NO_NODE);
+ if (irq_base < 0) {
+ dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+ return irq_base;
+ }
+ } else {
+ irq_base = 0;
+ }
+
+ ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index d0e5d6e..e2c1988 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str)
if (*str == '=')
str++;
- if (!strncmp(str, "cec_disable", 7))
+ if (!strcmp(str, "cec_disable"))
ce_arr.disabled = 1;
else
return 0;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index df63e44..bf04479 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -109,6 +109,7 @@
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
select MFD_SYSCON
select QCOM_RPROC_COMMON
select QCOM_SCM
@@ -120,6 +121,7 @@
tristate "Qualcomm WCNSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SMEM
select QCOM_MDT_LOADER
select QCOM_RPROC_COMMON
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 612d914..633268e 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
if (!(att->flags & ATT_OWN))
continue;
- if (b > IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX7D_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
att->sa, att->size);
- if (IS_ERR(priv->mem[b].cpu_addr)) {
+ if (!priv->mem[b].cpu_addr) {
dev_err(dev, "devm_ioremap_resource failed\n");
- err = PTR_ERR(priv->mem[b].cpu_addr);
- return err;
+ return -ENOMEM;
}
priv->mem[b].sys_addr = att->sa;
priv->mem[b].size = att->size;
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
return err;
}
- if (b > IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX7D_RPROC_MEM_MAX)
break;
priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 5a5e927..5dcc9bf 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
unsigned long flags;
intent = kzalloc(sizeof(*intent), GFP_KERNEL);
-
if (!intent)
return NULL;
intent->data = kzalloc(size, GFP_KERNEL);
if (!intent->data)
- return NULL;
+ goto free_intent;
spin_lock_irqsave(&channel->intent_lock, flags);
ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&channel->intent_lock, flags);
- return NULL;
+ goto free_data;
}
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
intent->reuse = reuseable;
return intent;
+
+free_data:
+ kfree(intent->data);
+free_intent:
+ kfree(intent);
+ return NULL;
}
static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (ret)
- return ret;
+ goto unlock;
ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
if (!ret) {
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
ret = channel->intent_req_result ? 0 : -ECANCELED;
}
+unlock:
mutex_unlock(&channel->intent_req_lock);
return ret;
}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 2fe216b..84a8ac2 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, tty->ldisc);
if (retval) {
- if (!WARN_ON(disc == N_TTY)) {
- tty_ldisc_put(tty->ldisc);
- tty->ldisc = NULL;
- }
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
}
return retval;
}
@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
if (tty->ldisc) {
if (reinit) {
- if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0)
- tty_ldisc_reinit(tty, N_TTY);
+ if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
+ tty_ldisc_reinit(tty, N_TTY) < 0)
+ WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
} else
tty_ldisc_kill(tty);
}
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index adaf6f6..e1cbdfd 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
- if (unlikely(copied < len && !PageUptodate(page))) {
- copied = 0;
- goto out;
+ if (!PageUptodate(page)) {
+ if (unlikely(copied < len)) {
+ copied = 0;
+ goto out;
+ } else if (len == PAGE_SIZE) {
+ SetPageUptodate(page);
+ }
}
/*
* No need to use i_size_read() here, the i_size
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 2a46762..a7c5a98 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -596,7 +596,7 @@ static void bm_evict_inode(struct inode *inode)
{
Node *e = inode->i_private;
- if (e->flags & MISC_FMT_OPEN_FILE)
+ if (e && e->flags & MISC_FMT_OPEN_FILE)
filp_close(e->interp_file, NULL);
clear_inode(inode);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 93d088f..789f55e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
set_page_writeback(page);
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
- if (result)
+ if (result) {
end_page_writeback(page);
- else
+ } else {
+ clean_page_buffers(page);
unlock_page(page);
+ }
blk_queue_exit(bdev->bd_queue);
return result;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 62cf812..96415c6 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -866,7 +866,8 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
*/
if (sdio->boundary) {
ret = dio_send_cur_page(dio, sdio, map_bh);
- dio_bio_submit(dio, sdio);
+ if (sdio->bio)
+ dio_bio_submit(dio, sdio);
put_page(sdio->cur_page);
sdio->cur_page = NULL;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9a7c903..4b4a72f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
void stop_discard_thread(struct f2fs_sb_info *sbi);
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void release_discard_addrs(struct f2fs_sb_info *sbi);
int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 621b9b3..c695ff4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
}
/* This comes from f2fs_put_super and f2fs_trim_fs */
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
{
__issue_discard_cmd(sbi, false);
__drop_discard_cmd(sbi);
- __wait_discard_cmd(sbi, false);
+ __wait_discard_cmd(sbi, !umount);
}
static void mark_discard_range_all(struct f2fs_sb_info *sbi)
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
}
/* It's time to issue all the filed discards */
mark_discard_range_all(sbi);
- f2fs_wait_discard_bios(sbi);
+ f2fs_wait_discard_bios(sbi, false);
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
return err;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 89f61eb..933c3d5 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb)
}
/* be sure to wait for any on-going discard commands */
- f2fs_wait_discard_bios(sbi);
+ f2fs_wait_discard_bios(sbi, true);
if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
struct cp_control cpc = {
diff --git a/fs/mpage.c b/fs/mpage.c
index 37bb77c..c991fae 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
try_to_free_buffers(page);
}
+/*
+ * For situations where we want to clean all buffers attached to a page.
+ * We don't need to calculate how many buffers are attached to the page,
+ * we just need to specify a number larger than the maximum number of buffers.
+ */
+void clean_page_buffers(struct page *page)
+{
+ clean_buffers(page, ~0U);
+}
+
static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
@@ -605,10 +615,8 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
if (bio == NULL) {
if (first_unmapped == blocks_per_page) {
if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
- page, wbc)) {
- clean_buffers(page, first_unmapped);
+ page, wbc))
goto out;
- }
}
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index efebe6c..22880ef 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
static void pnfs_init_server(struct nfs_server *server)
{
rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
- rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
}
#else
@@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void)
ida_init(&server->openowner_id);
ida_init(&server->lockowner_id);
pnfs_init_server(server);
+ rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
return server;
}
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 44c638b..508126e 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
dprintk("--> %s\n", __func__);
- nfs4_fl_put_deviceid(fl->dsaddr);
+ if (fl->dsaddr != NULL)
+ nfs4_fl_put_deviceid(fl->dsaddr);
/* This assumes a single RW lseg */
if (lseg->pls_range.iomode == IOMODE_RW) {
struct nfs4_filelayout *flo;
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index dd5d27d..30426c1 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -274,7 +274,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen,
ssize_t ret;
ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
- if (ret <= 0)
+ if (ret < 0)
return ERR_PTR(ret);
rkey = request_key(&key_type_id_resolver, desc, "");
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6c61e2b..f90090e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -8399,8 +8399,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
lo = NFS_I(inode)->layout;
/* If the open stateid was bad, then recover it. */
if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
- nfs4_stateid_match_other(&lgp->args.stateid,
- &lgp->args.ctx->state->stateid)) {
+ !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
spin_unlock(&inode->i_lock);
exception->state = lgp->args.ctx->state;
exception->stateid = &lgp->args.stateid;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 37c8af0..14ed979 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1842,8 +1842,8 @@ static void encode_create_session(struct xdr_stream *xdr,
* Assumes OPEN is the biggest non-idempotent compound.
* 2 is the verifier.
*/
- max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
- RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
+ max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 2)
+ * XDR_UNIT + RPC_MAX_AUTH_SIZE;
encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 3c69db7..8487486 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u)
exp_put(u->secinfo.si_exp);
}
+static void
+nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
+{
+ if (u->secinfo_no_name.sin_exp)
+ exp_put(u->secinfo_no_name.sin_exp);
+}
+
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_SECINFO_NO_NAME] = {
.op_func = nfsd4_secinfo_no_name,
- .op_release = nfsd4_secinfo_release,
+ .op_release = nfsd4_secinfo_no_name_release,
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
.op_rsize_bop = nfsd4_secinfo_rsize,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 50b0556..52ad151 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
spin_lock(&dquot->dq_dqb_lock);
if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
- goto add;
+ goto finish;
tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ space + rsv_space;
- if (flags & DQUOT_SPACE_NOFAIL)
- goto add;
-
if (dquot->dq_dqb.dqb_bhardlimit &&
tspace > dquot->dq_dqb.dqb_bhardlimit &&
!ignore_hardlimit(dquot)) {
if (flags & DQUOT_SPACE_WARN)
prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
ret = -EDQUOT;
- goto out;
+ goto finish;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
if (flags & DQUOT_SPACE_WARN)
prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
ret = -EDQUOT;
- goto out;
+ goto finish;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
* be always printed
*/
ret = -EDQUOT;
- goto out;
+ goto finish;
}
}
-add:
- dquot->dq_dqb.dqb_rsvspace += rsv_space;
- dquot->dq_dqb.dqb_curspace += space;
-out:
+finish:
+ /*
+ * We have to be careful and go through warning generation & grace time
+ * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
+ * only here...
+ */
+ if (flags & DQUOT_SPACE_NOFAIL)
+ ret = 0;
+ if (!ret) {
+ dquot->dq_dqb.dqb_rsvspace += rsv_space;
+ dquot->dq_dqb.dqb_curspace += space;
+ }
spin_unlock(&dquot->dq_dqb_lock);
return ret;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 744dcae..f965ce8 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small(
bp = xfs_btree_get_bufs(args->mp, args->tp,
args->agno, fbno, 0);
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
xfs_trans_binval(args->tp, bp);
}
args->len = 1;
@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto out_agbp_relse;
+ }
xfs_trans_binval(tp, bp);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 044a363..def32fa1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1477,14 +1477,14 @@ xfs_bmap_isaeof(
int is_empty;
int error;
- bma->aeof = 0;
+ bma->aeof = false;
error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
&is_empty);
if (error)
return error;
if (is_empty) {
- bma->aeof = 1;
+ bma->aeof = true;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 988bb3f..dfd6439 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1962,7 +1962,7 @@ xfs_difree_inobt(
if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
rec.ir_free == XFS_INOBT_ALL_FREE &&
mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
- xic->deleted = 1;
+ xic->deleted = true;
xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
@@ -1989,7 +1989,7 @@ xfs_difree_inobt(
xfs_difree_inode_chunk(mp, agno, &rec, dfops);
} else {
- xic->deleted = 0;
+ xic->deleted = false;
error = xfs_inobt_update(cur, &rec);
if (error) {
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 8372e9b..71de185 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
uint32_t ilf_fields; /* flags for fields logged */
uint16_t ilf_asize; /* size of attr d/ext/root */
uint16_t ilf_dsize; /* size of data/ext/root */
+ uint32_t ilf_pad; /* pad for 64 bit boundary */
uint64_t ilf_ino; /* inode number */
union {
uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -280,7 +281,12 @@ typedef struct xfs_inode_log_format {
int32_t ilf_boffset; /* off of inode in buffer */
} xfs_inode_log_format_t;
-typedef struct xfs_inode_log_format_32 {
+/*
+ * Old 32 bit systems will log in this format without the 64 bit
+ * alignment padding. Recovery will detect this and convert it to the
+ * correct format.
+ */
+struct xfs_inode_log_format_32 {
uint16_t ilf_type; /* inode log item type */
uint16_t ilf_size; /* size of this item */
uint32_t ilf_fields; /* flags for fields logged */
@@ -294,24 +300,7 @@ typedef struct xfs_inode_log_format_32 {
int64_t ilf_blkno; /* blkno of inode buffer */
int32_t ilf_len; /* len of inode buffer */
int32_t ilf_boffset; /* off of inode in buffer */
-} __attribute__((packed)) xfs_inode_log_format_32_t;
-
-typedef struct xfs_inode_log_format_64 {
- uint16_t ilf_type; /* inode log item type */
- uint16_t ilf_size; /* size of this item */
- uint32_t ilf_fields; /* flags for fields logged */
- uint16_t ilf_asize; /* size of attr d/ext/root */
- uint16_t ilf_dsize; /* size of data/ext/root */
- uint32_t ilf_pad; /* pad for 64 bit boundary */
- uint64_t ilf_ino; /* inode number */
- union {
- uint32_t ilfu_rdev; /* rdev value for dev inode*/
- uuid_t ilfu_uuid; /* mount point value */
- } ilf_u;
- int64_t ilf_blkno; /* blkno of inode buffer */
- int32_t ilf_len; /* len of inode buffer */
- int32_t ilf_boffset; /* off of inode in buffer */
-} xfs_inode_log_format_64_t;
+} __attribute__((packed));
/*
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 7034e17..3354140 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
int
xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
+ umode_t mode;
+ bool set_mode = false;
int error = 0;
if (!acl)
@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
return error;
if (type == ACL_TYPE_ACCESS) {
- umode_t mode;
-
error = posix_acl_update_mode(inode, &mode, &acl);
if (error)
return error;
- error = xfs_set_mode(inode, mode);
- if (error)
- return error;
+ set_mode = true;
}
set_acl:
- return __xfs_set_acl(inode, acl, type);
+ error = __xfs_set_acl(inode, acl, type);
+ if (error)
+ return error;
+
+ /*
+ * We set the mode after successfully updating the ACL xattr because the
+ * xattr update can fail at ENOSPC and we don't want to change the mode
+ * if the ACL update hasn't been applied.
+ */
+ if (set_mode)
+ error = xfs_set_mode(inode, mode);
+
+ return error;
}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index ebd66b1..e3a950e 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
&bp, XFS_ATTR_FORK);
if (error)
return error;
+ node = bp->b_addr;
+ btree = dp->d_ops->node_tree_p(node);
child_fsb = be32_to_cpu(btree[i + 1].before);
xfs_trans_brelse(*trans, bp);
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index e9db7fc..6503cfa 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -84,6 +84,7 @@ xfs_zero_extent(
GFP_NOFS, 0);
}
+#ifdef CONFIG_XFS_RT
int
xfs_bmap_rtalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -190,6 +191,7 @@ xfs_bmap_rtalloc(
}
return 0;
}
+#endif /* CONFIG_XFS_RT */
/*
* Check if the endoff is outside the last extent. If so the caller will grow
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 0eaa81d..7d330b3c 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -28,7 +28,20 @@ struct xfs_mount;
struct xfs_trans;
struct xfs_bmalloca;
+#ifdef CONFIG_XFS_RT
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
+#else /* !CONFIG_XFS_RT */
+/*
+ * Attempts to allocate RT extents when RT is disable indicates corruption and
+ * should trigger a shutdown.
+ */
+static inline int
+xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
+{
+ return -EFSCORRUPTED;
+}
+#endif /* CONFIG_XFS_RT */
+
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 309e26c..56d0e52 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -764,7 +764,7 @@ xfs_file_fallocate(
enum xfs_prealloc_flags flags = 0;
uint iolock = XFS_IOLOCK_EXCL;
loff_t new_size = 0;
- bool do_file_insert = 0;
+ bool do_file_insert = false;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
@@ -825,7 +825,7 @@ xfs_file_fallocate(
error = -EINVAL;
goto out_unlock;
}
- do_file_insert = 1;
+ do_file_insert = true;
} else {
flags |= XFS_PREALLOC_SET;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 814ed729..560e0b4 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -521,6 +521,7 @@ __xfs_getfsmap_rtdev(
return query_fn(tp, info);
}
+#ifdef CONFIG_XFS_RT
/* Actually query the realtime bitmap. */
STATIC int
xfs_getfsmap_rtdev_rtbitmap_query(
@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap(
return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
info);
}
+#endif /* CONFIG_XFS_RT */
/* Execute a getfsmap query against the regular data device. */
STATIC int
@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys(
return false;
}
+/*
+ * There are only two devices if we didn't configure RT devices at build time.
+ */
+#ifdef CONFIG_XFS_RT
#define XFS_GETFSMAP_DEVS 3
+#else
+#define XFS_GETFSMAP_DEVS 2
+#endif /* CONFIG_XFS_RT */
+
/*
* Get filesystem's extents as described in head, and format for
* output. Calls formatter to fill the user's buffer until all
@@ -853,10 +863,12 @@ xfs_getfsmap(
handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
handlers[1].fn = xfs_getfsmap_logdev;
}
+#ifdef CONFIG_XFS_RT
if (mp->m_rtdev_targp) {
handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
}
+#endif /* CONFIG_XFS_RT */
xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
xfs_getfsmap_dev_compare);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index a705f34..9bbc2d7 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
to->di_dmstate = from->di_dmstate;
to->di_flags = from->di_flags;
+ /* log a dummy value to ensure log structure is fully initialised */
+ to->di_next_unlinked = NULLAGINO;
+
if (from->di_version == 3) {
to->di_changecount = inode->i_version;
to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
* the second with the on-disk inode structure, and a possible third and/or
* fourth with the inode data/extents/b-tree root and inode attributes
* data/extents/b-tree root.
+ *
+ * Note: Always use the 64 bit inode log format structure so we don't
+ * leave an uninitialised hole in the format item on 64 bit systems. Log
+ * recovery on 32 bit systems handles this just fine, so there's no reason
+ * for not using an initialising the properly padded structure all the time.
*/
STATIC void
xfs_inode_item_format(
@@ -412,8 +420,8 @@ xfs_inode_item_format(
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
- struct xfs_inode_log_format *ilf;
struct xfs_log_iovec *vecp = NULL;
+ struct xfs_inode_log_format *ilf;
ASSERT(ip->i_d.di_version > 1);
@@ -425,7 +433,17 @@ xfs_inode_item_format(
ilf->ilf_boffset = ip->i_imap.im_boffset;
ilf->ilf_fields = XFS_ILOG_CORE;
ilf->ilf_size = 2; /* format + core */
- xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
+
+ /*
+ * make sure we don't leak uninitialised data into the log in the case
+ * when we don't log every field in the inode.
+ */
+ ilf->ilf_dsize = 0;
+ ilf->ilf_asize = 0;
+ ilf->ilf_pad = 0;
+ uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
+
+ xlog_finish_iovec(lv, vecp, sizeof(*ilf));
xfs_inode_item_format_core(ip, lv, &vecp);
xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -855,44 +873,29 @@ xfs_istale_done(
}
/*
- * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
- * (which can have different field alignments) to the native version
+ * convert an xfs_inode_log_format struct from the old 32 bit version
+ * (which can have different field alignments) to the native 64 bit version
*/
int
xfs_inode_item_format_convert(
- xfs_log_iovec_t *buf,
- xfs_inode_log_format_t *in_f)
+ struct xfs_log_iovec *buf,
+ struct xfs_inode_log_format *in_f)
{
- if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
- xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
+ struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
- in_f->ilf_type = in_f32->ilf_type;
- in_f->ilf_size = in_f32->ilf_size;
- in_f->ilf_fields = in_f32->ilf_fields;
- in_f->ilf_asize = in_f32->ilf_asize;
- in_f->ilf_dsize = in_f32->ilf_dsize;
- in_f->ilf_ino = in_f32->ilf_ino;
- /* copy biggest field of ilf_u */
- uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
- in_f->ilf_blkno = in_f32->ilf_blkno;
- in_f->ilf_len = in_f32->ilf_len;
- in_f->ilf_boffset = in_f32->ilf_boffset;
- return 0;
- } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
- xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
+ if (buf->i_len != sizeof(*in_f32))
+ return -EFSCORRUPTED;
- in_f->ilf_type = in_f64->ilf_type;
- in_f->ilf_size = in_f64->ilf_size;
- in_f->ilf_fields = in_f64->ilf_fields;
- in_f->ilf_asize = in_f64->ilf_asize;
- in_f->ilf_dsize = in_f64->ilf_dsize;
- in_f->ilf_ino = in_f64->ilf_ino;
- /* copy biggest field of ilf_u */
- uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
- in_f->ilf_blkno = in_f64->ilf_blkno;
- in_f->ilf_len = in_f64->ilf_len;
- in_f->ilf_boffset = in_f64->ilf_boffset;
- return 0;
- }
- return -EFSCORRUPTED;
+ in_f->ilf_type = in_f32->ilf_type;
+ in_f->ilf_size = in_f32->ilf_size;
+ in_f->ilf_fields = in_f32->ilf_fields;
+ in_f->ilf_asize = in_f32->ilf_asize;
+ in_f->ilf_dsize = in_f32->ilf_dsize;
+ in_f->ilf_ino = in_f32->ilf_ino;
+ /* copy biggest field of ilf_u */
+ uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
+ in_f->ilf_blkno = in_f32->ilf_blkno;
+ in_f->ilf_len = in_f32->ilf_len;
+ in_f->ilf_boffset = in_f32->ilf_boffset;
+ return 0;
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c5107c7..dc95a49 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2515,7 +2515,7 @@ xlog_write(
if (lv)
vecp = lv->lv_iovecp;
}
- if (record_cnt == 0 && ordered == false) {
+ if (record_cnt == 0 && !ordered) {
if (!lv)
return 0;
break;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index ea7d4b4..e9727d0 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -704,7 +704,7 @@ xfs_mountfs(
xfs_set_maxicount(mp);
/* enable fail_at_unmount as default */
- mp->m_fail_unmount = 1;
+ mp->m_fail_unmount = true;
error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
if (error)
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0c381d7..0492436 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28);
XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8);
XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52);
- XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8390859..f1af7d6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
+static inline int bpf_obj_get_user(const char __user *pathname)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
u32 key)
{
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c8dae555..446b24c 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0ad4c30..91189bb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,12 @@
#define STACK_MAGIC 0xdeadbeef
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
/* @a is a power of 2 value */
@@ -57,6 +63,10 @@
#define READ 0
#define WRITE 1
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#define u64_to_user_ptr(x) ( \
@@ -76,7 +86,15 @@
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))
+/**
+ * FIELD_SIZEOF - get the size of a struct's field
+ * @t: the target struct
+ * @f: the target struct's field
+ * Return: the size of @f in the struct definition without having a
+ * declared instance of @t.
+ */
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \
@@ -107,7 +125,7 @@
/*
* Divide positive or negative dividend by positive or negative divisor
* and round to closest integer. Result is undefined for negative
- * divisors if he dividend variable type is unsigned and for negative
+ * divisors if the dividend variable type is unsigned and for negative
* dividends if the divisor variable type is unsigned.
*/
#define DIV_ROUND_CLOSEST(x, divisor)( \
@@ -247,13 +265,13 @@ extern int _cond_resched(void);
* @ep_ro: right open interval endpoint
*
* Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, ep_ro), where the upper interval endpoint is right-open.
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
* This is useful, e.g. for accessing a index of an array containing
- * ep_ro elements, for example. Think of it as sort of modulus, only that
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
* the result isn't that of modulo. ;) Note that if initial input is a
* small value, then result will return 0.
*
- * Return: a result based on val in interval [0, ep_ro).
+ * Return: a result based on @val in interval [0, @ep_ro).
*/
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
@@ -618,8 +636,8 @@ do { \
* trace_printk - printf formatting in the ftrace buffer
* @fmt: the printf format for printing
*
- * Note: __trace_printk is an internal function for trace_printk and
- * the @ip is passed in via the trace_printk macro.
+ * Note: __trace_printk is an internal function for trace_printk() and
+ * the @ip is passed in via the trace_printk() macro.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
@@ -629,7 +647,7 @@ do { \
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used)
+ * allocated when trace_printk() is used.)
*
* A little optization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
@@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* the @ip is passed in via the trace_puts macro.
*
* This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" affects,
+ * paths that a developer wants the least amount of "Heisenbug" effects,
* where the processing of the print format is still too much.
*
* This function allows a kernel developer to debug fast path sections
@@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_puts scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used)
+ * allocated when trace_puts() is used.)
*
* Returns: 0 if nothing was written, positive # if string was.
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
t2 min2 = (y); \
(void) (&min1 == &min2); \
min1 < min2 ? min1 : min2; })
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
#define min(x, y) \
__min(typeof(x), typeof(y), \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
t2 max2 = (y); \
(void) (&max1 == &max2); \
max1 > max2 ? max1 : max2; })
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
#define max(x, y) \
__max(typeof(x), typeof(y), \
__UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
x, y)
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
#define min3(x, y, z) min((typeof(x))min(x, y), z)
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
#define max3(x, y, z) max((typeof(x))max(x, y), z)
/**
@@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @lo: lowest allowable value
* @hi: highest allowable value
*
- * This macro does strict typechecking of lo/hi to make sure they are of the
- * same type as val. See the unnecessary pointer comparisons.
+ * This macro does strict typechecking of @lo/@hi to make sure they are of the
+ * same type as @val. See the unnecessary pointer comparisons.
*/
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
@@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
*
* Or not use min/max/clamp at all, of course.
*/
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
#define min_t(type, x, y) \
__min(type, type, \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
x, y)
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
#define max_t(type, x, y) \
__max(type, type, \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
- * 'type' to make all the comparisons.
+ * @type to make all the comparisons.
*/
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
@@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
- * type the input argument 'val' is. This is useful when val is an unsigned
- * type and min and max are literals that will otherwise be assigned a signed
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-/*
- * swap - swap value of @a and @b
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
*/
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2c2a551..528b24c 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -108,9 +108,10 @@ struct ebt_table {
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
~(__alignof__(struct _xt_align)-1))
-extern struct ebt_table *ebt_register_table(struct net *net,
- const struct ebt_table *table,
- const struct nf_hook_ops *);
+extern int ebt_register_table(struct net *net,
+ const struct ebt_table *table,
+ const struct nf_hook_ops *ops,
+ struct ebt_table **res);
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
const struct nf_hook_ops *);
extern unsigned int ebt_do_table(struct sk_buff *skb,
diff --git a/include/linux/of.h b/include/linux/of.h
index cfc34117..b240ed6 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
return NULL;
}
+static inline int of_n_addr_cells(struct device_node *np)
+{
+ return 0;
+
+}
+static inline int of_n_size_cells(struct device_node *np)
+{
+ return 0;
+}
+
static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index d7b6dab..7d065ab 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -71,14 +71,6 @@ struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
-
- /*
- * Some variables from the most recent sd_lb_stats for this domain,
- * used by wake_affine().
- */
- unsigned long nr_running;
- unsigned long load;
- unsigned long capacity;
};
struct sched_domain {
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 905d769..5f7eeab 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -42,7 +42,7 @@ enum {
#define THREAD_ALIGN THREAD_SIZE
#endif
-#ifdef CONFIG_DEBUG_STACK_USAGE
+#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
__GFP_ZERO)
#else
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index a03acd0..695257a 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
int port; /* created/attached port */
unsigned int flags; /* SNDRV_VIRMIDI_* */
rwlock_t filelist_lock;
+ struct rw_semaphore filelist_sem;
struct list_head filelist;
};
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
index b97725a..da161b5 100644
--- a/include/uapi/linux/netfilter/xt_bpf.h
+++ b/include/uapi/linux/netfilter/xt_bpf.h
@@ -23,6 +23,7 @@ enum xt_bpf_modes {
XT_BPF_MODE_FD_PINNED,
XT_BPF_MODE_FD_ELF,
};
+#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
struct xt_bpf_info_v1 {
__u16 mode;
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index e833ed9..be1dde9 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -363,6 +363,7 @@ int bpf_obj_get_user(const char __user *pathname)
putname(pname);
return ret;
}
+EXPORT_SYMBOL_GPL(bpf_obj_get_user);
static void bpf_evict_inode(struct inode *inode)
{
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b914fbe..8b8d6ba 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
{
struct bpf_verifier_state *parent = state->parent;
+ if (regno == BPF_REG_FP)
+ /* We don't need to worry about FP liveness because it's read-only */
+ return;
+
while (parent) {
/* if read wasn't screened by an earlier write ... */
if (state->regs[regno].live & REG_LIVE_WRITTEN)
@@ -2345,6 +2349,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
* copy register state to dest reg
*/
regs[insn->dst_reg] = regs[insn->src_reg];
+ regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6bc21e2..9d93db8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -662,7 +662,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
/*
* Do not update time when cgroup is not active
*/
- if (cgrp == event->cgrp)
+ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
__update_cgrp_time(event->cgrp);
}
@@ -8955,6 +8955,14 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
static void free_pmu_context(struct pmu *pmu)
{
+ /*
+ * Static contexts such as perf_sw_context have a global lifetime
+ * and may be shared between different PMUs. Avoid freeing them
+ * when a single PMU is going away.
+ */
+ if (pmu->task_ctx_nr > perf_invalid_context)
+ return;
+
mutex_lock(&pmus_lock);
free_percpu(pmu->pmu_cpu_context);
mutex_unlock(&pmus_lock);
diff --git a/kernel/exit.c b/kernel/exit.c
index f2cd53e..cf28528 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1610,6 +1610,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
if (!infop)
return err;
+ if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+ goto Efault;
+
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
@@ -1735,6 +1738,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
if (!infop)
return err;
+ if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
+ goto Efault;
+
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
diff --git a/kernel/fork.c b/kernel/fork.c
index e702cb9..07cc743 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s)
continue;
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ /* Clear stale pointers from reused stack. */
+ memset(s->addr, 0, THREAD_SIZE);
+#endif
tsk->stack_vm_area = s;
return s->addr;
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6fc89fd..5a2ef92c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
irq_setup_affinity(desc);
break;
case IRQ_STARTUP_MANAGED:
+ irq_do_set_affinity(d, aff, false);
ret = __irq_startup(desc);
- irq_set_affinity_locked(d, aff, false);
break;
case IRQ_STARTUP_ABORT:
return 0;
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 638eb9c..9eb09ae 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -18,8 +18,34 @@
static inline bool irq_needs_fixup(struct irq_data *d)
{
const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
+ unsigned int cpu = smp_processor_id();
- return cpumask_test_cpu(smp_processor_id(), m);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ /*
+ * The cpumask_empty() check is a workaround for interrupt chips,
+ * which do not implement effective affinity, but the architecture has
+ * enabled the config switch. Use the general affinity mask instead.
+ */
+ if (cpumask_empty(m))
+ m = irq_data_get_affinity_mask(d);
+
+ /*
+ * Sanity check. If the mask is not empty when excluding the outgoing
+ * CPU then it must contain at least one online CPU. The outgoing CPU
+ * has been removed from the online mask already.
+ */
+ if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
+ cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
+ /*
+ * If this happens then there was a missed IRQ fixup at some
+ * point. Warn about it and enforce fixup.
+ */
+ pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
+ cpumask_pr_args(m), d->irq, cpu);
+ return true;
+ }
+#endif
+ return cpumask_test_cpu(cpu, m);
}
static bool migrate_one_irq(struct irq_desc *desc)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d00132b..4bff6a1 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc)
set_bit(IRQTF_AFFINITY, &action->thread_flags);
}
+static void irq_validate_effective_affinity(struct irq_data *data)
+{
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+
+ if (!cpumask_empty(m))
+ return;
+ pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
+ chip->name, data->irq);
+#endif
+}
+
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
+ if (!chip || !chip->irq_set_affinity)
+ return -EINVAL;
+
ret = chip->irq_set_affinity(data, mask, force);
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
cpumask_copy(desc->irq_common_data.affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
+ irq_validate_effective_affinity(data);
irq_set_thread_affinity(desc);
ret = 0;
}
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index b9628e4..bf8c8fd 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch)
}
EXPORT_SYMBOL_GPL(klp_register_patch);
+/*
+ * Remove parts of patches that touch a given kernel module. The list of
+ * patches processed might be limited. When limit is NULL, all patches
+ * will be handled.
+ */
+static void klp_cleanup_module_patches_limited(struct module *mod,
+ struct klp_patch *limit)
+{
+ struct klp_patch *patch;
+ struct klp_object *obj;
+
+ list_for_each_entry(patch, &klp_patches, list) {
+ if (patch == limit)
+ break;
+
+ klp_for_each_object(patch, obj) {
+ if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
+ continue;
+
+ /*
+ * Only unpatch the module if the patch is enabled or
+ * is in transition.
+ */
+ if (patch->enabled || patch == klp_transition_patch) {
+ pr_notice("reverting patch '%s' on unloading module '%s'\n",
+ patch->mod->name, obj->mod->name);
+ klp_unpatch_object(obj);
+ }
+
+ klp_free_object_loaded(obj);
+ break;
+ }
+ }
+}
+
int klp_module_coming(struct module *mod)
{
int ret;
@@ -894,7 +929,7 @@ int klp_module_coming(struct module *mod)
pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
patch->mod->name, obj->mod->name, obj->mod->name);
mod->klp_alive = false;
- klp_free_object_loaded(obj);
+ klp_cleanup_module_patches_limited(mod, patch);
mutex_unlock(&klp_mutex);
return ret;
@@ -902,9 +937,6 @@ int klp_module_coming(struct module *mod)
void klp_module_going(struct module *mod)
{
- struct klp_patch *patch;
- struct klp_object *obj;
-
if (WARN_ON(mod->state != MODULE_STATE_GOING &&
mod->state != MODULE_STATE_COMING))
return;
@@ -917,25 +949,7 @@ void klp_module_going(struct module *mod)
*/
mod->klp_alive = false;
- list_for_each_entry(patch, &klp_patches, list) {
- klp_for_each_object(patch, obj) {
- if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
- continue;
-
- /*
- * Only unpatch the module if the patch is enabled or
- * is in transition.
- */
- if (patch->enabled || patch == klp_transition_patch) {
- pr_notice("reverting patch '%s' on unloading module '%s'\n",
- patch->mod->name, obj->mod->name);
- klp_unpatch_object(obj);
- }
-
- klp_free_object_loaded(obj);
- break;
- }
- }
+ klp_cleanup_module_patches_limited(mod, NULL);
mutex_unlock(&klp_mutex);
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 44c8d0d..e36e652 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance, struct stack_trace *trace,
int (*save)(struct stack_trace *trace))
{
- struct lock_list *entry;
- int ret;
- struct lock_list this;
struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *entry;
+ struct lock_list this;
+ int ret;
/*
* Prove that the new <prev> -> <next> dependency would not
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
this.class = hlock_class(next);
this.parent = NULL;
ret = check_noncircular(&this, hlock_class(prev), &target_entry);
- if (unlikely(!ret))
+ if (unlikely(!ret)) {
+ if (!trace->entries) {
+ /*
+ * If @save fails here, the printing might trigger
+ * a WARN but because of the !nr_entries it should
+ * not do bad things.
+ */
+ save(trace);
+ }
return print_circular_bug(&this, target_entry, next, prev, trace);
+ }
else if (unlikely(ret < 0))
return print_bfs_bug(ret);
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
return print_bfs_bug(ret);
- if (save && !save(trace))
+ if (!trace->entries && !save(trace))
return 0;
/*
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
if (!ret)
return 0;
- /*
- * Debugging printouts:
- */
- if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
- graph_unlock();
- printk("\n new dependency: ");
- print_lock_name(hlock_class(prev));
- printk(KERN_CONT " => ");
- print_lock_name(hlock_class(next));
- printk(KERN_CONT "\n");
- dump_stack();
- if (!graph_lock())
- return 0;
- }
return 2;
}
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
struct held_lock *hlock;
- struct stack_trace trace;
- int (*save)(struct stack_trace *trace) = save_trace;
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .max_entries = 0,
+ .entries = NULL,
+ .skip = 0,
+ };
/*
* Debugging checks.
@@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
*/
if (hlock->read != 2 && hlock->check) {
int ret = check_prev_add(curr, hlock, next,
- distance, &trace, save);
+ distance, &trace, save_trace);
if (!ret)
return 0;
/*
- * Stop saving stack_trace if save_trace() was
- * called at least once:
- */
- if (save && ret == 2)
- save = NULL;
-
- /*
* Stop after the first non-trylock entry,
* as non-trylock entries have added their
* own direct dependencies already, so this
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 70ba32e..d3f3094 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5356,91 +5356,62 @@ static int wake_wide(struct task_struct *p)
return 1;
}
-struct llc_stats {
- unsigned long nr_running;
- unsigned long load;
- unsigned long capacity;
- int has_capacity;
-};
+/*
+ * The purpose of wake_affine() is to quickly determine on which CPU we can run
+ * soonest. For the purpose of speed we only consider the waking and previous
+ * CPU.
+ *
+ * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
+ * will be) idle.
+ *
+ * wake_affine_weight() - considers the weight to reflect the average
+ * scheduling latency of the CPUs. This seems to work
+ * for the overloaded case.
+ */
-static bool get_llc_stats(struct llc_stats *stats, int cpu)
+static bool
+wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
+ int this_cpu, int prev_cpu, int sync)
{
- struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+ if (idle_cpu(this_cpu))
+ return true;
- if (!sds)
- return false;
+ if (sync && cpu_rq(this_cpu)->nr_running == 1)
+ return true;
- stats->nr_running = READ_ONCE(sds->nr_running);
- stats->load = READ_ONCE(sds->load);
- stats->capacity = READ_ONCE(sds->capacity);
- stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu);
-
- return true;
+ return false;
}
-/*
- * Can a task be moved from prev_cpu to this_cpu without causing a load
- * imbalance that would trigger the load balancer?
- *
- * Since we're running on 'stale' values, we might in fact create an imbalance
- * but recomputing these values is expensive, as that'd mean iteration 2 cache
- * domains worth of CPUs.
- */
static bool
-wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
- int this_cpu, int prev_cpu, int sync)
+wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
+ int this_cpu, int prev_cpu, int sync)
{
- struct llc_stats prev_stats, this_stats;
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
- if (!get_llc_stats(&prev_stats, prev_cpu) ||
- !get_llc_stats(&this_stats, this_cpu))
- return false;
+ this_eff_load = target_load(this_cpu, sd->wake_idx);
+ prev_eff_load = source_load(prev_cpu, sd->wake_idx);
- /*
- * If sync wakeup then subtract the (maximum possible)
- * effect of the currently running task from the load
- * of the current LLC.
- */
if (sync) {
unsigned long current_load = task_h_load(current);
- /* in this case load hits 0 and this LLC is considered 'idle' */
- if (current_load > this_stats.load)
+ if (current_load > this_eff_load)
return true;
- this_stats.load -= current_load;
+ this_eff_load -= current_load;
}
- /*
- * The has_capacity stuff is not SMT aware, but by trying to balance
- * the nr_running on both ends we try and fill the domain at equal
- * rates, thereby first consuming cores before siblings.
- */
-
- /* if the old cache has capacity, stay there */
- if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1)
- return false;
-
- /* if this cache has capacity, come here */
- if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
- return true;
-
- /*
- * Check to see if we can move the load without causing too much
- * imbalance.
- */
task_load = task_h_load(p);
- this_eff_load = 100;
- this_eff_load *= prev_stats.capacity;
+ this_eff_load += task_load;
+ if (sched_feat(WA_BIAS))
+ this_eff_load *= 100;
+ this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= this_stats.capacity;
-
- this_eff_load *= this_stats.load + task_load;
- prev_eff_load *= prev_stats.load - task_load;
+ prev_eff_load -= task_load;
+ if (sched_feat(WA_BIAS))
+ prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
+ prev_eff_load *= capacity_of(this_cpu);
return this_eff_load <= prev_eff_load;
}
@@ -5449,22 +5420,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
int prev_cpu, int sync)
{
int this_cpu = smp_processor_id();
- bool affine;
+ bool affine = false;
- /*
- * Default to no affine wakeups; wake_affine() should not effect a task
- * placement the load-balancer feels inclined to undo. The conservative
- * option is therefore to not move tasks when they wake up.
- */
- affine = false;
+ if (sched_feat(WA_IDLE) && !affine)
+ affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
- /*
- * If the wakeup is across cache domains, try to evaluate if movement
- * makes sense, otherwise rely on select_idle_siblings() to do
- * placement inside the cache domain.
- */
- if (!cpus_share_cache(prev_cpu, this_cpu))
- affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync);
+ if (sched_feat(WA_WEIGHT) && !affine)
+ affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
if (affine) {
@@ -7600,7 +7562,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
*/
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
- struct sched_domain_shared *shared = env->sd->shared;
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
@@ -7672,22 +7633,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (env->dst_rq->rd->overload != overload)
env->dst_rq->rd->overload = overload;
}
-
- if (!shared)
- return;
-
- /*
- * Since these are sums over groups they can contain some CPUs
- * multiple times for the NUMA domains.
- *
- * Currently only wake_affine_llc() and find_busiest_group()
- * uses these numbers, only the last is affected by this problem.
- *
- * XXX fix that.
- */
- WRITE_ONCE(shared->nr_running, sds->total_running);
- WRITE_ONCE(shared->load, sds->total_load);
- WRITE_ONCE(shared->capacity, sds->total_capacity);
}
/**
@@ -8098,6 +8043,13 @@ static int should_we_balance(struct lb_env *env)
int cpu, balance_cpu = -1;
/*
+ * Ensure the balancing environment is consistent; can happen
+ * when the softirq triggers 'during' hotplug.
+ */
+ if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
+ return 0;
+
+ /*
* In the newly idle case, we will allow all the cpu's
* to do the newly idle load balance.
*/
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index d3fb155..319ed0e 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -81,3 +81,6 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
SCHED_FEAT(ATTACH_AGE_LOAD, true)
+SCHED_FEAT(WA_IDLE, true)
+SCHED_FEAT(WA_WEIGHT, true)
+SCHED_FEAT(WA_BIAS, true)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index bb3a380..0ae832e 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -473,7 +473,7 @@ static long seccomp_attach_filter(unsigned int flags,
return 0;
}
-void __get_seccomp_filter(struct seccomp_filter *filter)
+static void __get_seccomp_filter(struct seccomp_filter *filter)
{
/* Reference count is bounded by the number of total processes. */
refcount_inc(&filter->usage);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2689b7c..dfdad67 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1092,8 +1092,8 @@
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
- select LOCKDEP_CROSSRELEASE
- select LOCKDEP_COMPLETIONS
+ select LOCKDEP_CROSSRELEASE if BROKEN
+ select LOCKDEP_COMPLETIONS if BROKEN
select TRACE_IRQFLAGS
default n
help
@@ -1590,6 +1590,54 @@
source kernel/trace/Kconfig
+config PROVIDE_OHCI1394_DMA_INIT
+ bool "Remote debugging over FireWire early on boot"
+ depends on PCI && X86
+ help
+ If you want to debug problems which hang or crash the kernel early
+ on boot and the crashing machine has a FireWire port, you can use
+ this feature to remotely access the memory of the crashed machine
+ over FireWire. This employs remote DMA as part of the OHCI1394
+ specification which is now the standard for FireWire controllers.
+
+ With remote DMA, you can monitor the printk buffer remotely using
+ firescope and access all memory below 4GB using fireproxy from gdb.
+ Even controlling a kernel debugger is possible using remote DMA.
+
+ Usage:
+
+ If ohci1394_dma=early is used as boot parameter, it will initialize
+ all OHCI1394 controllers which are found in the PCI config space.
+
+ As all changes to the FireWire bus such as enabling and disabling
+ devices cause a bus reset and thereby disable remote DMA for all
+ devices, be sure to have the cable plugged and FireWire enabled on
+ the debugging host before booting the debug target for debugging.
+
+ This code (~1k) is freed after boot. By then, the firewire stack
+ in charge of the OHCI-1394 controllers should be used instead.
+
+ See Documentation/debugging-via-ohci1394.txt for more information.
+
+config DMA_API_DEBUG
+ bool "Enable debugging of DMA-API usage"
+ depends on HAVE_DMA_API_DEBUG
+ help
+ Enable this option to debug the use of the DMA API by device drivers.
+ With this option you will be able to detect common bugs in device
+ drivers like double-freeing of DMA mappings or freeing mappings that
+ were never allocated.
+
+ This also attempts to catch cases where a page owned by DMA is
+ accessed by the cpu in a way that could cause data corruption. For
+ example, this enables cow_user_page() to check that the source page is
+ not undergoing DMA.
+
+ This option causes a performance degradation. Use only if you want to
+ debug device drivers and dma interactions.
+
+ If unsure, say N.
+
menu "Runtime Testing"
config LKDTM
@@ -1749,56 +1797,6 @@
If unsure, say N.
-endmenu # runtime tests
-
-config PROVIDE_OHCI1394_DMA_INIT
- bool "Remote debugging over FireWire early on boot"
- depends on PCI && X86
- help
- If you want to debug problems which hang or crash the kernel early
- on boot and the crashing machine has a FireWire port, you can use
- this feature to remotely access the memory of the crashed machine
- over FireWire. This employs remote DMA as part of the OHCI1394
- specification which is now the standard for FireWire controllers.
-
- With remote DMA, you can monitor the printk buffer remotely using
- firescope and access all memory below 4GB using fireproxy from gdb.
- Even controlling a kernel debugger is possible using remote DMA.
-
- Usage:
-
- If ohci1394_dma=early is used as boot parameter, it will initialize
- all OHCI1394 controllers which are found in the PCI config space.
-
- As all changes to the FireWire bus such as enabling and disabling
- devices cause a bus reset and thereby disable remote DMA for all
- devices, be sure to have the cable plugged and FireWire enabled on
- the debugging host before booting the debug target for debugging.
-
- This code (~1k) is freed after boot. By then, the firewire stack
- in charge of the OHCI-1394 controllers should be used instead.
-
- See Documentation/debugging-via-ohci1394.txt for more information.
-
-config DMA_API_DEBUG
- bool "Enable debugging of DMA-API usage"
- depends on HAVE_DMA_API_DEBUG
- help
- Enable this option to debug the use of the DMA API by device drivers.
- With this option you will be able to detect common bugs in device
- drivers like double-freeing of DMA mappings or freeing mappings that
- were never allocated.
-
- This also attempts to catch cases where a page owned by DMA is
- accessed by the cpu in a way that could cause data corruption. For
- example, this enables cow_user_page() to check that the source page is
- not undergoing DMA.
-
- This option causes a performance degradation. Use only if you want to
- debug device drivers and dma interactions.
-
- If unsure, say N.
-
config TEST_LKM
tristate "Test module loading with 'hello world' module"
default n
@@ -1873,18 +1871,6 @@
If unsure, say N.
-config MEMTEST
- bool "Memtest"
- depends on HAVE_MEMBLOCK
- ---help---
- This option adds a kernel parameter 'memtest', which allows memtest
- to be set.
- memtest=0, mean disabled; -- default
- memtest=1, mean do 1 test pattern;
- ...
- memtest=17, mean do 17 test patterns.
- If you are unsure how to answer this question, answer N.
-
config TEST_STATIC_KEYS
tristate "Test static keys"
default n
@@ -1894,16 +1880,6 @@
If unsure, say N.
-config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
- help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
- for validity.
-
- If unsure, say N.
-
config TEST_KMOD
tristate "kmod stress tester"
default n
@@ -1941,6 +1917,29 @@
If unsure, say N.
+endmenu # runtime tests
+
+config MEMTEST
+ bool "Memtest"
+ depends on HAVE_MEMBLOCK
+ ---help---
+ This option adds a kernel parameter 'memtest', which allows memtest
+ to be set.
+ memtest=0, mean disabled; -- default
+ memtest=1, mean do 1 test pattern;
+ ...
+ memtest=17, mean do 17 test patterns.
+ If you are unsure how to answer this question, answer N.
+
+config BUG_ON_DATA_CORRUPTION
+ bool "Trigger a BUG when data corruption is detected"
+ select DEBUG_LIST
+ help
+ Select this option if the kernel should BUG when it encounters
+ data corruption in kernel memory structures when they get checked
+ for validity.
+
+ If unsure, say N.
source "samples/Kconfig"
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index cd0b5c9..2b827b8 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2031,11 +2031,13 @@ void locking_selftest(void)
print_testname("mixed read-lock/lock-write ABBA");
pr_cont(" |");
dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+#ifdef CONFIG_PROVE_LOCKING
/*
* Lockdep does indeed fail here, but there's nothing we can do about
* that now. Don't kill lockdep for it.
*/
unexpected_testcase_failures--;
+#endif
pr_cont(" |");
dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
diff --git a/mm/cma.c b/mm/cma.c
index c0da318..022e52b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
trace_cma_alloc(pfn, page, count, align);
- if (ret) {
+ if (ret && !(gfp_mask & __GFP_NOWARN)) {
pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
__func__, count, ret);
cma_debug_show_areas(cma);
diff --git a/mm/madvise.c b/mm/madvise.c
index 25bade3..fd70d6a 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -757,6 +757,9 @@ madvise_behavior_valid(int behavior)
* MADV_DONTFORK - omit this area from child's address space when forking:
* typically, to avoid COWing pages pinned by get_user_pages().
* MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
+ * MADV_WIPEONFORK - present the child process with zero-filled memory in this
+ * range after a fork.
+ * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
* MADV_HWPOISON - trigger memory error handler as if the given memory range
* were corrupted by unrecoverable hardware memory failure.
* MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
@@ -777,7 +780,9 @@ madvise_behavior_valid(int behavior)
* zero - success
* -EINVAL - start + len < 0, start is not page-aligned,
* "behavior" is not a valid value, or application
- * is attempting to release locked or shared pages.
+ * is attempting to release locked or shared pages,
+ * or the specified address range includes file, Huge TLB,
+ * MAP_SHARED or VMPFNMAP range.
* -ENOMEM - addresses in the specified range are not currently
* mapped, or are outside the AS of the process.
* -EIO - an I/O error occurred while paging in data.
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 006ba62..a2af6d5 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
struct page *page;
page = __alloc_pages(gfp, order, nid);
- if (page && page_to_nid(page) == nid)
- inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
+ if (page && page_to_nid(page) == nid) {
+ preempt_disable();
+ __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
+ preempt_enable();
+ }
return page;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 6954c14..e00814c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start,
unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
- migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE;
+ migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
migrate->dst[migrate->npages] = 0;
+ migrate->npages++;
migrate->cpages++;
}
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 6a03946..53afbb9 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -6,17 +6,6 @@
#include "internal.h"
-static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
-{
- pmd_t pmde;
- /*
- * Make sure we don't re-load pmd between present and !trans_huge check.
- * We need a consistent view.
- */
- pmde = READ_ONCE(*pvmw->pmd);
- return pmd_present(pmde) && !pmd_trans_huge(pmde);
-}
-
static inline bool not_found(struct page_vma_mapped_walk *pvmw)
{
page_vma_mapped_walk_done(pvmw);
@@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
+ pmd_t pmde;
/* The only possible pmd mapping has been handled on last iteration */
if (pvmw->pmd && !pvmw->pte)
@@ -148,7 +138,13 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (!pud_present(*pud))
return false;
pvmw->pmd = pmd_offset(pud, pvmw->address);
- if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) {
+ /*
+ * Make sure the pmd value isn't cached in a register by the
+ * compiler and used as a stale value after we've observed a
+ * subsequent update.
+ */
+ pmde = READ_ONCE(*pvmw->pmd);
+ if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
if (likely(pmd_trans_huge(*pvmw->pmd))) {
if (pvmw->flags & PVMW_MIGRATION)
@@ -167,17 +163,15 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return not_found(pvmw);
return true;
}
- } else
- WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
+ }
return not_found(pvmw);
} else {
/* THP pmd was split under us: handle on pte level */
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
}
- } else {
- if (!check_pmd(pvmw))
- return false;
+ } else if (!pmd_present(pmde)) {
+ return false;
}
if (!map_pte(pvmw))
goto next_pte;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ed91091..05b6803 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES];
static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
bool swap_vma_readahead = true;
-#define SWAP_RA_MAX_ORDER_DEFAULT 3
-
-static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT;
-
#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
@@ -664,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
pte_t *tpte;
#endif
+ max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
+ SWAP_RA_ORDER_CEILING);
+ if (max_win == 1) {
+ swap_ra->win = 1;
+ return NULL;
+ }
+
faddr = vmf->address;
entry = pte_to_swp_entry(vmf->orig_pte);
if ((unlikely(non_swap_entry(entry))))
@@ -672,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
if (page)
return page;
- max_win = 1 << READ_ONCE(swap_ra_max_order);
- if (max_win == 1) {
- swap_ra->win = 1;
- return NULL;
- }
-
fpfn = PFN_DOWN(faddr);
swap_ra_info = GET_SWAP_RA_VAL(vma);
pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
@@ -786,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr =
__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
vma_ra_enabled_store);
-static ssize_t vma_ra_max_order_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", swap_ra_max_order);
-}
-static ssize_t vma_ra_max_order_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- int err, v;
-
- err = kstrtoint(buf, 10, &v);
- if (err || v > SWAP_RA_ORDER_CEILING || v <= 0)
- return -EINVAL;
-
- swap_ra_max_order = v;
-
- return count;
-}
-static struct kobj_attribute vma_ra_max_order_attr =
- __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show,
- vma_ra_max_order_store);
-
static struct attribute *swap_attrs[] = {
&vma_ra_enabled_attr.attr,
- &vma_ra_max_order_attr.attr,
NULL,
};
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8a43db6..6739420 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
- if (fatal_signal_pending(current)) {
- area->nr_pages = i;
- goto fail_no_warn;
- }
-
if (node == NUMA_NO_NODE)
page = alloc_page(alloc_mask|highmem_mask);
else
@@ -1723,7 +1718,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure, allocated %ld of %ld bytes",
(area->nr_pages*PAGE_SIZE), area->size);
-fail_no_warn:
vfree(area->addr);
return NULL;
}
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 2585b10..276b602 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
static int __net_init broute_net_init(struct net *net)
{
- net->xt.broute_table = ebt_register_table(net, &broute_table, NULL);
- return PTR_ERR_OR_ZERO(net->xt.broute_table);
+ return ebt_register_table(net, &broute_table, NULL,
+ &net->xt.broute_table);
}
static void __net_exit broute_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 45a00db..c41da5f 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
static int __net_init frame_filter_net_init(struct net *net)
{
- net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter);
- return PTR_ERR_OR_ZERO(net->xt.frame_filter);
+ return ebt_register_table(net, &frame_filter, ebt_ops_filter,
+ &net->xt.frame_filter);
}
static void __net_exit frame_filter_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 57cd5bb..08df740 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
static int __net_init frame_nat_net_init(struct net *net)
{
- net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat);
- return PTR_ERR_OR_ZERO(net->xt.frame_nat);
+ return ebt_register_table(net, &frame_nat, ebt_ops_nat,
+ &net->xt.frame_nat);
}
static void __net_exit frame_nat_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 83951f9..3b3dcf7 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
kfree(table);
}
-struct ebt_table *
-ebt_register_table(struct net *net, const struct ebt_table *input_table,
- const struct nf_hook_ops *ops)
+int ebt_register_table(struct net *net, const struct ebt_table *input_table,
+ const struct nf_hook_ops *ops, struct ebt_table **res)
{
struct ebt_table_info *newinfo;
struct ebt_table *t, *table;
@@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
repl->entries == NULL || repl->entries_size == 0 ||
repl->counters != NULL || input_table->private != NULL) {
BUGPRINT("Bad table data for ebt_register_table!!!\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/* Don't add one table to multiple lists. */
@@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
mutex_unlock(&ebt_mutex);
+ WRITE_ONCE(*res, table);
+
if (!ops)
- return table;
+ return 0;
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
if (ret) {
__ebt_unregister_table(net, table);
- return ERR_PTR(ret);
+ *res = NULL;
}
- return table;
+ return ret;
free_unlock:
mutex_unlock(&ebt_mutex);
free_chainstack:
@@ -1276,7 +1277,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
free_table:
kfree(table);
out:
- return ERR_PTR(ret);
+ return ret;
}
void ebt_unregister_table(struct net *net, struct ebt_table *table,
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 416bb30..1859c47 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
greh = (struct gre_base_hdr *)skb_transport_header(skb);
pcsum = (__sum16 *)(greh + 1);
- if (gso_partial) {
+ if (gso_partial && skb_is_gso(skb)) {
unsigned int partial_adj;
/* Adjust checksum to account for the fact that
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 811689e..f75fc6b 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
if (synproxy == NULL)
return NF_ACCEPT;
- if (nf_is_loopback_packet(skb))
+ if (nf_is_loopback_packet(skb) ||
+ ip_hdr(skb)->protocol != IPPROTO_TCP)
return NF_ACCEPT;
thoff = ip_hdrlen(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ac6fde5..3d9f1c2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2513,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
struct rtable *ort = (struct rtable *) dst_orig;
struct rtable *rt;
- rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
+ rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5676237..e45177c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2240,20 +2240,16 @@ int udp_v4_early_demux(struct sk_buff *skb)
iph = ip_hdr(skb);
uh = udp_hdr(skb);
- if (skb->pkt_type == PACKET_BROADCAST ||
- skb->pkt_type == PACKET_MULTICAST) {
+ if (skb->pkt_type == PACKET_MULTICAST) {
in_dev = __in_dev_get_rcu(skb->dev);
if (!in_dev)
return 0;
- /* we are supposed to accept bcast packets */
- if (skb->pkt_type == PACKET_MULTICAST) {
- ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
- iph->protocol);
- if (!ours)
- return 0;
- }
+ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+ iph->protocol);
+ if (!ours)
+ return 0;
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr,
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 97658bf..e360d55 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
* will be using a length value equal to only one MSS sized
* segment instead of the entire frame.
*/
- if (gso_partial) {
+ if (gso_partial && skb_is_gso(skb)) {
uh->len = htons(skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)uh);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 96861c7..4a96ebb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3820,8 +3820,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
goto out;
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
- dev_net(dev)->ipv6.devconf_all->accept_dad < 1 ||
- idev->cnf.accept_dad < 1 ||
+ (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
+ idev->cnf.accept_dad < 1) ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
bump_id = ifp->flags & IFA_F_TENTATIVE;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cdb3728..4a87f94 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
- if (gso_partial)
+ if (gso_partial && skb_is_gso(skb))
payload_len = skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)(ipv6h + 1);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index a5cd43d..437af8c 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
nexthdr = ipv6_hdr(skb)->nexthdr;
thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
- if (thoff < 0)
+ if (thoff < 0 || nexthdr != IPPROTO_TCP)
return NF_ACCEPT;
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 26cc9f4..a96d5b3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
struct dst_entry *new = NULL;
rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
- DST_OBSOLETE_NONE, 0);
+ DST_OBSOLETE_DEAD, 0);
if (rt) {
rt6_info_init(rt);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e495b5e..cf84f7b 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
from->family == to->family))
return -IPSET_ERR_TYPE_MISMATCH;
- if (from->ref_netlink || to->ref_netlink)
+ write_lock_bh(&ip_set_ref_lock);
+
+ if (from->ref_netlink || to->ref_netlink) {
+ write_unlock_bh(&ip_set_ref_lock);
return -EBUSY;
+ }
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
strncpy(from->name, to->name, IPSET_MAXNAMELEN);
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
- write_lock_bh(&ip_set_ref_lock);
swap(from->ref, to->ref);
ip_set(inst, from_id) = to;
ip_set(inst, to_id) = from;
@@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = {
static int __init
ip_set_init(void)
{
- int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+ int ret = register_pernet_subsys(&ip_set_net_ops);
- if (ret != 0) {
- pr_err("ip_set: cannot register with nfnetlink.\n");
+ if (ret) {
+ pr_err("ip_set: cannot register pernet_subsys.\n");
return ret;
}
+
+ ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+ if (ret != 0) {
+ pr_err("ip_set: cannot register with nfnetlink.\n");
+ unregister_pernet_subsys(&ip_set_net_ops);
+ return ret;
+ }
+
ret = nf_register_sockopt(&so_set);
if (ret != 0) {
pr_err("SO_SET registry failed: %d\n", ret);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ unregister_pernet_subsys(&ip_set_net_ops);
return ret;
}
- ret = register_pernet_subsys(&ip_set_net_ops);
- if (ret) {
- pr_err("ip_set: cannot register pernet_subsys.\n");
- nf_unregister_sockopt(&so_set);
- nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
- return ret;
- }
+
pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
return 0;
}
@@ -2098,9 +2104,10 @@ ip_set_init(void)
static void __exit
ip_set_fini(void)
{
- unregister_pernet_subsys(&ip_set_net_ops);
nf_unregister_sockopt(&so_set);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+
+ unregister_pernet_subsys(&ip_set_net_ops);
pr_debug("these are the famous last words\n");
}
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 20bfbd3..613eb21 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
ip &= ip_set_hostmask(h->netmask);
+ e.ip = htonl(ip);
+ if (e.ip == 0)
+ return -IPSET_ERR_HASH_ELEM;
- if (adt == IPSET_TEST) {
- e.ip = htonl(ip);
- if (e.ip == 0)
- return -IPSET_ERR_HASH_ELEM;
+ if (adt == IPSET_TEST)
return adtfn(set, &e, &ext, &ext, flags);
- }
ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
@@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
- if (retried)
+ if (retried) {
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip += hosts) {
e.ip = htonl(ip);
- if (e.ip == 0)
- return -IPSET_ERR_HASH_ELEM;
+ }
+ for (; ip <= ip_to;) {
ret = adtfn(set, &e, &ext, &ext, flags);
-
if (ret && !ip_set_eexist(ret, flags))
return ret;
+ ip += hosts;
+ e.ip = htonl(ip);
+ if (e.ip == 0)
+ return 0;
+
ret = 0;
}
return ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index b64cf14..f3ba834 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
e.ip = htonl(ip);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index f438740..ddb8039 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 6215fb8..a7f4d7a 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5ab1b99..a2f19b9 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- for (; !before(ip_to, ip); ip++) {
+ for (; ip <= ip_to; ip++) {
e.ip = htonl(ip);
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
@@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip == ntohl(h->next.ip) &&
p == ntohs(h->next.port)
? ntohl(h->next.ip2) : ip2_from;
- while (!after(ip2, ip2_to)) {
+ while (ip2 <= ip2_to) {
e.ip2 = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
&cidr);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 5d9e8954..1c67a17 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
}
if (retried)
ip = ntohl(h->next.ip);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 44cf119..d417074 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index db614e1..7f9ae2e 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip[0]);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip[0] = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
ip2 = (retried &&
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
: ip2_from;
- while (!after(ip2, ip2_to)) {
+ while (ip2 <= ip2_to) {
e.ip[1] = htonl(ip2);
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 54b64b6..e6ef382 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &cidr);
e.cidr = cidr - 1;
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index aff8469..8602f25 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried)
ip = ntohl(h->next.ip[0]);
- while (!after(ip, ip_to)) {
+ while (ip <= ip_to) {
e.ip[0] = htonl(ip);
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
@@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
: ip2_from;
- while (!after(ip2, ip2_to)) {
+ while (ip2 <= ip2_to) {
e.ip[1] = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
&e.cidr[1]);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 90d3968..4527921 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
{
struct sk_buff *new_skb = NULL;
struct iphdr *old_iph = NULL;
+ __u8 old_dsfield;
#ifdef CONFIG_IP_VS_IPV6
struct ipv6hdr *old_ipv6h = NULL;
#endif
@@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
*payload_len =
ntohs(old_ipv6h->payload_len) +
sizeof(*old_ipv6h);
- *dsfield = ipv6_get_dsfield(old_ipv6h);
+ old_dsfield = ipv6_get_dsfield(old_ipv6h);
*ttl = old_ipv6h->hop_limit;
if (df)
*df = 0;
@@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
/* fix old IP header checksum */
ip_send_check(old_iph);
- *dsfield = ipv4_get_dsfield(old_iph);
+ old_dsfield = ipv4_get_dsfield(old_iph);
*ttl = old_iph->ttl;
if (payload_len)
*payload_len = ntohs(old_iph->tot_len);
}
+ /* Implement full-functionality option for ECN encapsulation */
+ *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
+
return skb;
error:
kfree_skb(skb);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 9299271..64e1ee0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
goto nla_put_failure;
- if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+ if (basechain->stats && nft_dump_stats(skb, basechain->stats))
goto nla_put_failure;
}
@@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME],
genmask);
- if (IS_ERR(chain2))
- return PTR_ERR(chain2);
+ if (!IS_ERR(chain2))
+ return -EEXIST;
}
if (nla[NFTA_CHAIN_COUNTERS]) {
@@ -2741,8 +2741,10 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
list_for_each_entry(i, &ctx->table->sets, list) {
if (!nft_is_active_next(ctx->net, i))
continue;
- if (!strcmp(set->name, i->name))
+ if (!strcmp(set->name, i->name)) {
+ kfree(set->name);
return -ENFILE;
+ }
}
return 0;
}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c83a3b5..d8571f4 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
return ERR_PTR(-EFAULT);
- strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+ memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
info->num_counters = compat_tmp.num_counters;
user += sizeof(compat_tmp);
} else
@@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(info, user, sizeof(*info)) != 0)
return ERR_PTR(-EFAULT);
- info->name[sizeof(info->name) - 1] = '\0';
user += sizeof(*info);
}
+ info->name[sizeof(info->name) - 1] = '\0';
size = sizeof(struct xt_counters);
size *= info->num_counters;
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 38986a9..2912393 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/syscalls.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/bpf.h>
@@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
return 0;
}
+static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
+{
+ mm_segment_t oldfs = get_fs();
+ int retval, fd;
+
+ set_fs(KERNEL_DS);
+ fd = bpf_obj_get_user(path);
+ set_fs(oldfs);
+ if (fd < 0)
+ return fd;
+
+ retval = __bpf_mt_check_fd(fd, ret);
+ sys_close(fd);
+ return retval;
+}
+
static int bpf_mt_check(const struct xt_mtchk_param *par)
{
struct xt_bpf_info *info = par->matchinfo;
@@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
return __bpf_mt_check_bytecode(info->bpf_program,
info->bpf_program_num_elem,
&info->filter);
- else if (info->mode == XT_BPF_MODE_FD_PINNED ||
- info->mode == XT_BPF_MODE_FD_ELF)
+ else if (info->mode == XT_BPF_MODE_FD_ELF)
return __bpf_mt_check_fd(info->fd, &info->filter);
+ else if (info->mode == XT_BPF_MODE_PATH_PINNED)
+ return __bpf_mt_check_path(info->path, &info->filter);
else
return -EINVAL;
}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index e75ef39..575d215 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
transparent = nf_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
if (sk != skb->sk)
@@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
transparent = nf_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
if (sk != skb->sk)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 94c11cf..f347506 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2266,16 +2266,17 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
+ if (cb->start) {
+ ret = cb->start(cb);
+ if (ret)
+ goto error_unlock;
+ }
+
nlk->cb_running = true;
mutex_unlock(nlk->cb_mutex);
- ret = 0;
- if (cb->start)
- ret = cb->start(cb);
-
- if (!ret)
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk);
sock_put(sk);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9b5de31..c1841f2 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2203,7 +2203,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
struct sock_xprt *transport =
container_of(work, struct sock_xprt, connect_worker.work);
struct rpc_xprt *xprt = &transport->xprt;
- struct socket *sock = transport->sock;
+ struct socket *sock;
int status = -EIO;
sock = xs_create_sock(xprt, transport,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7d99029..a140dd4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
struct sk_buff_head xmitq;
int rc = 0;
- __skb_queue_head_init(&xmitq);
+ skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
if (tipc_link_bc_peers(l))
rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
u32 dst, selector;
selector = msg_link_selector(buf_msg(skb_peek(pkts)));
- __skb_queue_head_init(&_pkts);
+ skb_queue_head_init(&_pkts);
list_for_each_entry_safe(n, tmp, &dests->list, list) {
dst = n->value;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 121e59a..17146c1 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
msg_set_destnode(msg, dnode);
msg_set_destport(msg, dport);
*err = TIPC_OK;
+
+ if (!skb_cloned(skb))
+ return true;
+
+ /* Unclone buffer in case it was bundled */
+ if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
+ return false;
+
return true;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 6908742..d396cb6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
[NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
};
+/* policy for packet pattern attributes */
+static const struct nla_policy
+nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
+ [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
+ [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
+ [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
@@ -10532,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
u8 *mask_pat;
nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
- NULL, info->extack);
+ nl80211_packet_pattern_policy,
+ info->extack);
err = -EINVAL;
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
@@ -10781,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
rem) {
u8 *mask_pat;
- nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL);
+ nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+ nl80211_packet_pattern_policy, NULL);
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
return -EINVAL;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index acf0010..30e5746 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
}
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
+ xso->dev = NULL;
dev_put(dev);
return 0;
}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 2515cd2..8ac9d32 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -429,7 +429,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
nf_reset(skb);
if (decaps) {
- skb->sp->olen = 0;
+ if (skb->sp)
+ skb->sp->olen = 0;
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return 0;
@@ -440,7 +441,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
if (xfrm_gro) {
- skb->sp->olen = 0;
+ if (skb->sp)
+ skb->sp->olen = 0;
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return err;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0dab1cd..1221347 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -732,12 +732,12 @@ int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
}
}
}
+out:
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
if (cnt) {
err = 0;
xfrm_policy_cache_flush();
}
-out:
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_state_flush);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 2bfbd91..b997f13 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0) {
x->km.state = XFRM_STATE_DEAD;
+ xfrm_dev_state_delete(x);
__xfrm_state_put(x);
goto out;
}
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 29df825..2f6ce80 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -103,11 +103,12 @@
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates.
+ file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
while read symbol; do
local fields=($symbol)
local sym_base=0x${fields[0]}
local sym_type=${fields[1]}
- local sym_end=0x${fields[3]}
+ local sym_end=${fields[3]}
# calculate the size
local sym_size=$(($sym_end - $sym_base))
@@ -157,7 +158,7 @@
addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
DONE=1
- done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }')
+ done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
}
[[ $# -lt 2 ]] && usage
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 5d55441..9ee9bf7 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s)
else if (str[0] == '$')
return -1;
/* exclude debugging symbols */
- else if (stype == 'N')
+ else if (stype == 'N' || stype == 'n')
return -1;
/* include the type field in the symbol name, so that it gets
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index ea2d0ae..6c9cba2 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
struct snd_seq_port_info *info = arg;
struct snd_seq_client_port *port;
struct snd_seq_port_callback *callback;
+ int port_idx;
/* it is not allowed to create the port for an another client */
if (info->addr.client != client->number)
@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
return -ENOMEM;
if (client->type == USER_CLIENT && info->kernel) {
- snd_seq_delete_port(client, port->addr.port);
+ port_idx = port->addr.port;
+ snd_seq_port_unlock(port);
+ snd_seq_delete_port(client, port_idx);
return -EINVAL;
}
if (client->type == KERNEL_CLIENT) {
@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
snd_seq_set_port_info(port, info);
snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
+ snd_seq_port_unlock(port);
return 0;
}
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 0a7020c..d21ece9 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
}
-/* create a port, port number is returned (-1 on failure) */
+/* create a port, port number is returned (-1 on failure);
+ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
+ */
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
int port)
{
@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
snd_use_lock_init(&new_port->use_lock);
port_subs_info_init(&new_port->c_src);
port_subs_info_init(&new_port->c_dest);
+ snd_use_lock_use(&new_port->use_lock);
num = port >= 0 ? port : 0;
mutex_lock(&client->ports_mutex);
@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
list_add_tail(&new_port->list, &p->list);
client->num_ports++;
new_port->addr.port = num; /* store the port number in the port */
+ sprintf(new_port->name, "port-%d", num);
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
- sprintf(new_port->name, "port-%d", num);
return new_port;
}
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 8d93a40..f48a4cd 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
* decode input event and put to read buffer of each opened file
*/
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
- struct snd_seq_event *ev)
+ struct snd_seq_event *ev,
+ bool atomic)
{
struct snd_virmidi *vmidi;
unsigned char msg[4];
int len;
- read_lock(&rdev->filelist_lock);
+ if (atomic)
+ read_lock(&rdev->filelist_lock);
+ else
+ down_read(&rdev->filelist_sem);
list_for_each_entry(vmidi, &rdev->filelist, list) {
if (!vmidi->trigger)
continue;
@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
snd_rawmidi_receive(vmidi->substream, msg, len);
}
}
- read_unlock(&rdev->filelist_lock);
+ if (atomic)
+ read_unlock(&rdev->filelist_lock);
+ else
+ up_read(&rdev->filelist_sem);
return 0;
}
@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
struct snd_virmidi_dev *rdev;
rdev = rmidi->private_data;
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, true);
}
#endif /* 0 */
@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
rdev = private_data;
if (!(rdev->flags & SNDRV_VIRMIDI_USE))
return 0; /* ignored */
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, atomic);
}
/*
@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
- unsigned long flags;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
vmidi->client = rdev->client;
vmidi->port = rdev->port;
runtime->private_data = vmidi;
- write_lock_irqsave(&rdev->filelist_lock, flags);
+ down_write(&rdev->filelist_sem);
+ write_lock_irq(&rdev->filelist_lock);
list_add_tail(&vmidi->list, &rdev->filelist);
- write_unlock_irqrestore(&rdev->filelist_lock, flags);
+ write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
vmidi->rdev = rdev;
return 0;
}
@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_virmidi *vmidi = substream->runtime->private_data;
+ down_write(&rdev->filelist_sem);
write_lock_irq(&rdev->filelist_lock);
list_del(&vmidi->list);
write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
rdev->rmidi = rmidi;
rdev->device = device;
rdev->client = -1;
+ init_rwsem(&rdev->filelist_sem);
rwlock_init(&rdev->filelist_lock);
INIT_LIST_HEAD(&rdev->filelist);
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 0fb6b1b7..d8409d9 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
if (err)
- return err;
+ goto err_kill_urb;
- if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
- return -ENODEV;
+ if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
+ err = -ENODEV;
+ goto err_kill_urb;
+ }
usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
cdev->vendor_name, CAIAQ_USB_STR_LEN);
@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
setup_card(cdev);
return 0;
+
+ err_kill_urb:
+ usb_kill_urb(&cdev->ep1_in_urb);
+ return err;
}
static int snd_probe(struct usb_interface *intf,
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 0ff5a7d..c8f723c 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface,
return 0;
error:
- if (line6->disconnect)
- line6->disconnect(line6);
- snd_card_free(card);
+ /* we can call disconnect callback here because no close-sync is
+ * needed yet at this point
+ */
+ line6_disconnect(interface);
return ret;
}
EXPORT_SYMBOL_GPL(line6_probe);
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 956f847..451007c 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6)
intf = usb_ifnum_to_if(line6->usbdev,
pod->line6.properties->ctrl_if);
- usb_driver_release_interface(&podhd_driver, intf);
+ if (intf)
+ usb_driver_release_interface(&podhd_driver, intf);
}
}
@@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6,
line6->disconnect = podhd_disconnect;
+ init_timer(&pod->startup_timer);
+ INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
+
if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
/* claim the data interface */
intf = usb_ifnum_to_if(line6->usbdev,
@@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6,
}
/* init device and delay registering */
- init_timer(&pod->startup_timer);
- INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
podhd_startup(pod);
return 0;
}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 9732edf..91bc8f1 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
{
+ /* kill pending URBs */
+ snd_usb_mixer_disconnect(mixer);
+
kfree(mixer->id_elems);
if (mixer->urb) {
kfree(mixer->urb->transfer_buffer);
@@ -2584,8 +2587,13 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
{
- usb_kill_urb(mixer->urb);
- usb_kill_urb(mixer->rc_urb);
+ if (mixer->disconnected)
+ return;
+ if (mixer->urb)
+ usb_kill_urb(mixer->urb);
+ if (mixer->rc_urb)
+ usb_kill_urb(mixer->rc_urb);
+ mixer->disconnected = true;
}
#ifdef CONFIG_PM
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 2b4b067..545d99b 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -22,6 +22,8 @@ struct usb_mixer_interface {
struct urb *rc_urb;
struct usb_ctrlrequest *rc_setup_packet;
u8 rc_buffer[6];
+
+ bool disconnected;
};
#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index b8cb57a..9ddaae3 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1138,6 +1138,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x047F, 0xC022): /* Plantronics C310 */
+ case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */
case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 43ab5c4..f90860d 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -312,7 +312,7 @@ union bpf_attr {
* jump into another BPF program
* @ctx: context pointer passed to next program
* @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
- * @index: index inside array that selects specific program to run
+ * @index: 32-bit index inside array that selects specific program to run
* Return: 0 on success or negative error
*
* int bpf_clone_redirect(skb, ifindex, flags)
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 3d4c3b5..0c977b6 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -586,7 +586,7 @@ static void print_sample_brstack(struct perf_sample *sample,
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
}
- printf("0x%"PRIx64, from);
+ printf(" 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alf.map, stdout);
@@ -681,7 +681,7 @@ static void print_sample_brstackoff(struct perf_sample *sample,
if (alt.map && !alt.map->dso->adjust_symbols)
to = map__map_ip(alt.map, to);
- printf("0x%"PRIx64, from);
+ printf(" 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alf.map, stdout);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index be09d77..a971caf 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -685,6 +685,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
{
struct symbol *sym = node->sym;
u64 left, right;
+ struct dso *left_dso = NULL;
+ struct dso *right_dso = NULL;
if (callchain_param.key == CCKEY_SRCLINE) {
enum match_result match = match_chain_srcline(node, cnode);
@@ -696,12 +698,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) {
left = cnode->ms.sym->start;
right = sym->start;
+ left_dso = cnode->ms.map->dso;
+ right_dso = node->map->dso;
} else {
left = cnode->ip;
right = node->ip;
}
- if (left == right) {
+ if (left == right && left_dso == right_dso) {
if (node->branch) {
cnode->branch_count++;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f6257fb..39b1596 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms)
static struct perf_evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
- char *name, struct cpu_map *cpus,
+ char *name, struct perf_pmu *pmu,
struct list_head *config_terms, bool auto_merge_stats)
{
struct perf_evsel *evsel;
+ struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
event_attr_init(attr);
@@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx,
(*idx)++;
evsel->cpus = cpu_map__get(cpus);
evsel->own_cpus = cpu_map__get(cpus);
- evsel->system_wide = !!cpus;
+ evsel->system_wide = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats;
if (name)
@@ -1233,7 +1234,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
if (!head_config) {
attr.type = pmu->type;
- evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu->cpus, NULL, auto_merge_stats);
+ evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
return evsel ? 0 : -ENOMEM;
}
@@ -1254,7 +1255,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
evsel = __add_event(list, &parse_state->idx, &attr,
- get_config_name(head_config), pmu->cpus,
+ get_config_name(head_config), pmu,
&config_terms, auto_merge_stats);
if (evsel) {
evsel->unit = info.unit;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index ac16a9d..1c4d7b4 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -470,31 +470,10 @@ static void pmu_read_sysfs(void)
closedir(dir);
}
-static struct cpu_map *pmu_cpumask(const char *name)
+static struct cpu_map *__pmu_cpumask(const char *path)
{
- struct stat st;
- char path[PATH_MAX];
FILE *file;
struct cpu_map *cpus;
- const char *sysfs = sysfs__mountpoint();
- const char *templates[] = {
- "%s/bus/event_source/devices/%s/cpumask",
- "%s/bus/event_source/devices/%s/cpus",
- NULL
- };
- const char **template;
-
- if (!sysfs)
- return NULL;
-
- for (template = templates; *template; template++) {
- snprintf(path, PATH_MAX, *template, sysfs, name);
- if (stat(path, &st) == 0)
- break;
- }
-
- if (!*template)
- return NULL;
file = fopen(path, "r");
if (!file)
@@ -506,6 +485,51 @@ static struct cpu_map *pmu_cpumask(const char *name)
}
/*
+ * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
+ * may have a "cpus" file.
+ */
+#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
+#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
+
+static struct cpu_map *pmu_cpumask(const char *name)
+{
+ char path[PATH_MAX];
+ struct cpu_map *cpus;
+ const char *sysfs = sysfs__mountpoint();
+ const char *templates[] = {
+ CPUS_TEMPLATE_UNCORE,
+ CPUS_TEMPLATE_CPU,
+ NULL
+ };
+ const char **template;
+
+ if (!sysfs)
+ return NULL;
+
+ for (template = templates; *template; template++) {
+ snprintf(path, PATH_MAX, *template, sysfs, name);
+ cpus = __pmu_cpumask(path);
+ if (cpus)
+ return cpus;
+ }
+
+ return NULL;
+}
+
+static bool pmu_is_uncore(const char *name)
+{
+ char path[PATH_MAX];
+ struct cpu_map *cpus;
+ const char *sysfs = sysfs__mountpoint();
+
+ snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
+ cpus = __pmu_cpumask(path);
+ cpu_map__put(cpus);
+
+ return !!cpus;
+}
+
+/*
* Return the CPU id as a raw string.
*
* Each architecture should provide a more precise id string that
@@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->cpus = pmu_cpumask(name);
+ pmu->is_uncore = pmu_is_uncore(name);
+
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
list_splice(&format, &pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 389e972..fe0de05 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -22,6 +22,7 @@ struct perf_pmu {
char *name;
__u32 type;
bool selectable;
+ bool is_uncore;
struct perf_event_attr *default_config;
struct cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 0f5e347..152823b 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -5,8 +5,8 @@
include ../lib.mk
override define RUN_TESTS
- $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
- $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
+ @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
+ @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
endef
override define EMIT_TESTS
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
index 00f2866..dd4162f 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
@@ -341,7 +341,7 @@ int main(int argc, char **argv)
return 0;
case 'n':
t = atoi(optarg);
- if (t > ARRAY_SIZE(test_cases))
+ if (t >= ARRAY_SIZE(test_cases))
error(1, 0, "Invalid test case: %d", t);
all_tests = false;
test_cases[t].enabled = true;
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index a2c53a3..de2f9ec 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
}
}
-static int copy_page(int ufd, unsigned long offset)
+static int __copy_page(int ufd, unsigned long offset, bool retry)
{
struct uffdio_copy uffdio_copy;
@@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset)
fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
uffdio_copy.copy), exit(1);
} else {
- if (test_uffdio_copy_eexist) {
+ if (test_uffdio_copy_eexist && retry) {
test_uffdio_copy_eexist = false;
retry_copy_page(ufd, &uffdio_copy, offset);
}
@@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset)
return 0;
}
+static int copy_page_retry(int ufd, unsigned long offset)
+{
+ return __copy_page(ufd, offset, true);
+}
+
+static int copy_page(int ufd, unsigned long offset)
+{
+ return __copy_page(ufd, offset, false);
+}
+
static void *uffd_poll_thread(void *arg)
{
unsigned long cpu = (unsigned long) arg;
@@ -544,7 +554,7 @@ static void *background_thread(void *arg)
for (page_nr = cpu * nr_pages_per_cpu;
page_nr < (cpu+1) * nr_pages_per_cpu;
page_nr++)
- copy_page(uffd, page_nr * page_size);
+ copy_page_retry(uffd, page_nr * page_size);
return NULL;
}
@@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd,
}
}
-static int uffdio_zeropage(int ufd, unsigned long offset)
+static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
{
struct uffdio_zeropage uffdio_zeropage;
int ret;
@@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n",
uffdio_zeropage.zeropage), exit(1);
} else {
- if (test_uffdio_zeropage_eexist) {
+ if (test_uffdio_zeropage_eexist && retry) {
test_uffdio_zeropage_eexist = false;
retry_uffdio_zeropage(ufd, &uffdio_zeropage,
offset);
@@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
return 0;
}
+static int uffdio_zeropage(int ufd, unsigned long offset)
+{
+ return __uffdio_zeropage(ufd, offset, false);
+}
+
/* exercise UFFDIO_ZEROPAGE */
static int userfaultfd_zeropage_test(void)
{
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 97f187e..0a74a20 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -20,7 +20,7 @@
BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
-CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
+CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
UNAME_M := $(shell uname -m)
CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)