[ANNOUNCE] v4.18.16-rt9
Dear RT folks!
I'm pleased to announce the v4.18.16-rt9 patch set.
Changes since v4.18.16-rt8:
- The RCU fix, which was introduced in v4.18.7-rt5, leads to a lockdep
warning during CPU hotplug. After a discussion with upstream it was
suggested to revert the change that lead to a problem in -RT.
Known issues
- A warning triggered in "rcu_note_context_switch" originated from
SyS_timer_gettime(). The issue was always there, it is now
visible. Reported by Grygorii Strashko and Daniel Wagner.
The delta patch against v4.18.16-rt8 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.18/incr/patch-4.18.16-rt8-rt9.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.18.16-rt9
The RT patch against v4.18.16 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patch-4.18.16-rt9.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18.16-rt9.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/patches/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch b/patches/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch
new file mode 100644
index 0000000..42dec86
--- /dev/null
+++ b/patches/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch
@@ -0,0 +1,44 @@
+From 53d4eed5325b9a26985c3d4f017d94919eb4ac89 Mon Sep 17 00:00:00 2001
+From: Paul E. McKenney <paulmck@linux.ibm.com>
+Date: Mon, 29 Oct 2018 11:53:01 +0100
+Subject: [PATCH] EXP rcu: Revert expedited GP parallelization cleverness
+
+(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu)
+
+This commit reverts a series of commits starting with fcc635436501 ("rcu:
+Make expedited GPs handle CPU 0 being offline") and its successors, thus
+queueing each rcu_node structure's expedited grace-period initialization
+work on the first CPU of that rcu_node structure.
+
+Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcu/tree_exp.h | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpu
+ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
+ smp_call_func_t func)
+ {
+- int cpu;
+ struct rcu_node *rnp;
+
+ trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
+@@ -493,13 +492,7 @@ static void sync_rcu_exp_select_cpus(str
+ continue;
+ }
+ INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
+- preempt_disable();
+- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+- /* If all offline, queue the work on an unbound CPU. */
+- if (unlikely(cpu > rnp->grphi))
+- cpu = WORK_CPU_UNBOUND;
+- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
+- preempt_enable();
++ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
+ rnp->exp_need_flush = true;
+ }
+
diff --git a/patches/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch b/patches/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch
index c99209f..8209ab0 100644
--- a/patches/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch
+++ b/patches/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch
@@ -239,7 +239,7 @@
return ret;
}
-@@ -4634,7 +4637,7 @@ static void css_release_work_fn(struct w
+@@ -4641,7 +4644,7 @@ static void css_release_work_fn(struct w
struct cgroup *tcgrp;
/* cgroup release path */
@@ -248,7 +248,7 @@
if (cgroup_on_dfl(cgrp))
cgroup_rstat_flush(cgrp);
-@@ -4977,7 +4980,7 @@ int cgroup_mkdir(struct kernfs_node *par
+@@ -4984,7 +4987,7 @@ int cgroup_mkdir(struct kernfs_node *par
if (ret)
goto out_destroy;
@@ -257,7 +257,7 @@
/* let's create and online css's */
kernfs_activate(kn);
-@@ -5165,9 +5168,8 @@ int cgroup_rmdir(struct kernfs_node *kn)
+@@ -5172,9 +5175,8 @@ int cgroup_rmdir(struct kernfs_node *kn)
return 0;
ret = cgroup_destroy_locked(cgrp);
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index d8e39ed..897fe08 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -52,7 +52,7 @@
/*
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
-@@ -4611,10 +4611,10 @@ static void css_free_rwork_fn(struct wor
+@@ -4618,10 +4618,10 @@ static void css_free_rwork_fn(struct wor
}
}
@@ -65,7 +65,7 @@
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4674,8 +4674,8 @@ static void css_release(struct percpu_re
+@@ -4681,8 +4681,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,7 +76,7 @@
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5397,6 +5397,7 @@ static int __init cgroup_wq_init(void)
+@@ -5404,6 +5404,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 7587954..e585b21 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -151,7 +151,7 @@
spin_lock(&dentry->d_lock);
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
-@@ -1864,7 +1864,7 @@ bool proc_fill_cache(struct file *file,
+@@ -1878,7 +1878,7 @@ bool proc_fill_cache(struct file *file,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index bc59d9a..bdae2fa 100644
--- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -27,7 +27,7 @@
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -2176,7 +2176,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -2192,7 +2192,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
@@ -69,7 +69,7 @@
timer->function = perf_mux_hrtimer_handler;
}
-@@ -9170,7 +9170,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -9176,7 +9176,7 @@ static void perf_swevent_init_hrtimer(st
if (!is_sampling_event(event))
return;
diff --git a/patches/iommu-amd-drop-irqs_disabled-warn_on.patch b/patches/iommu-amd-drop-irqs_disabled-warn_on.patch
index 04985a7d..b0eb9d3 100644
--- a/patches/iommu-amd-drop-irqs_disabled-warn_on.patch
+++ b/patches/iommu-amd-drop-irqs_disabled-warn_on.patch
@@ -24,7 +24,7 @@
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
-@@ -1944,12 +1944,6 @@ static int __attach_device(struct iommu_
+@@ -1950,12 +1950,6 @@ static int __attach_device(struct iommu_
{
int ret;
@@ -37,7 +37,7 @@
/* lock domain */
spin_lock(&domain->lock);
-@@ -2115,12 +2109,6 @@ static void __detach_device(struct iommu
+@@ -2121,12 +2115,6 @@ static void __detach_device(struct iommu
{
struct protection_domain *domain;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 68c7b97..02952cd 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt8
++-rt9
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index e0f09df..0929a7f 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5470,6 +5470,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -5492,6 +5492,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -58,7 +58,7 @@
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -5481,6 +5482,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -5503,6 +5504,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch
index a9b11f6..326ce1d 100644
--- a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch
+++ b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch
@@ -18,7 +18,7 @@
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4252,11 +4252,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4274,11 +4274,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index 96ed25e..12a8861 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,7 +20,7 @@
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3242,7 +3242,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3264,7 +3264,11 @@ static inline int __dev_xmit_skb(struct
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 440c334..d2cc34b 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -36,7 +36,7 @@
/*
* Time (in jiffies) of last Tx
*/
-@@ -2554,14 +2558,53 @@ void netdev_freemem(struct net_device *d
+@@ -2561,14 +2565,53 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -91,7 +91,7 @@
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-@@ -3639,10 +3682,48 @@ static inline u32 netif_msg_init(int deb
+@@ -3646,10 +3689,48 @@ static inline u32 netif_msg_init(int deb
return (1 << debug_value) - 1;
}
@@ -141,7 +141,7 @@
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
-@@ -3659,32 +3740,32 @@ static inline void __netif_tx_release(st
+@@ -3666,32 +3747,32 @@ static inline void __netif_tx_release(st
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
@@ -193,7 +193,7 @@
struct task_struct *oom_reaper_list;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3314,8 +3314,10 @@ static void skb_update_prio(struct sk_bu
+@@ -3336,8 +3336,10 @@ static void skb_update_prio(struct sk_bu
#define skb_update_prio(skb)
#endif
@@ -204,7 +204,7 @@
/**
* dev_loopback_xmit - loop back @skb
-@@ -3555,9 +3557,12 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3577,9 +3579,12 @@ static int __dev_queue_xmit(struct sk_bu
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
@@ -219,7 +219,7 @@
goto recursion_alert;
skb = validate_xmit_skb(skb, dev, &again);
-@@ -3567,9 +3572,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3589,9 +3594,9 @@ static int __dev_queue_xmit(struct sk_bu
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -231,7 +231,7 @@
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
-@@ -7858,7 +7863,7 @@ static void netdev_init_one_queue(struct
+@@ -7882,7 +7887,7 @@ static void netdev_init_one_queue(struct
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 24285c8..14f2cee 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -67,7 +67,7 @@
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5875,7 +5875,7 @@ static __latent_entropy void net_rx_acti
+@@ -5897,7 +5897,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch b/patches/rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch
deleted file mode 100644
index 9401234..0000000
--- a/patches/rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 10 Sep 2018 14:58:37 +0200
-Subject: [PATCH] rcu: Use cpus_read_lock() while looking at cpu_online_mask
-
-It was possible that sync_rcu_exp_select_cpus() enqueued something on
-CPU0 while CPU0 was offline. Such a work item wouldn't be processed
-until CPU0 gets back online. This problem was addressed in commit
-fcc6354365015 ("rcu: Make expedited GPs handle CPU 0 being offline"). I
-don't think the issue fully addressed.
-
-Assume grplo = 0 and grphi = 7 and sync_rcu_exp_select_cpus() is invoked
-on CPU1. The preempt_disable() section on CPU1 won't ensure that CPU0
-remains online between looking at cpu_online_mask and invoking
-queue_work_on() on CPU1.
-
-Use cpus_read_lock() to ensure that `cpu' is not going down between
-looking at cpu_online_mask at invoking queue_work_on() and waiting for
-its completion. It is added around the loop + flush_work() which is
-similar to work_on_cpu_safe() (and we can have multiple jobs running on
-NUMA systems).
-
-Fixes: fcc6354365015 ("rcu: Make expedited GPs handle CPU 0 being
- offline")
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/rcu/tree_exp.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/rcu/tree_exp.h
-+++ b/kernel/rcu/tree_exp.h
-@@ -479,6 +479,7 @@ static void sync_rcu_exp_select_cpus(str
- sync_exp_reset_tree(rsp);
- trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
-
-+ cpus_read_lock();
- /* Schedule work for each leaf rcu_node structure. */
- rcu_for_each_leaf_node(rsp, rnp) {
- rnp->exp_need_flush = false;
-@@ -493,13 +494,11 @@ static void sync_rcu_exp_select_cpus(str
- continue;
- }
- INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
-- preempt_disable();
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
- /* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi))
- cpu = WORK_CPU_UNBOUND;
- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
-- preempt_enable();
- rnp->exp_need_flush = true;
- }
-
-@@ -507,6 +506,7 @@ static void sync_rcu_exp_select_cpus(str
- rcu_for_each_leaf_node(rsp, rnp)
- if (rnp->exp_need_flush)
- flush_work(&rnp->rew.rew_work);
-+ cpus_read_unlock();
- }
-
- static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
diff --git a/patches/series b/patches/series
index 1a12179..2485ed7 100644
--- a/patches/series
+++ b/patches/series
@@ -57,7 +57,6 @@
cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
fscache-initialize-cookie-hash-table-raw-spinlocks.patch
Drivers-hv-vmbus-include-header-for-get_irq_regs.patch
-rcu-Use-cpus_read_lock-while-looking-at-cpu_online_m.patch
############################################################
# Ready for posting
@@ -75,6 +74,7 @@
arm64-KVM-compute_layout-before-altenates-are-applie.patch
of-allocate-free-phandle-cache-outside-of-the-devtre.patch
mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch
+EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch
###############################################################
# Stuff broken upstream and upstream wants something different
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index d6c3e6a..531e3c3 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2916,6 +2916,7 @@ struct softnet_data {
+@@ -2923,6 +2923,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -65,7 +65,7 @@
#endif
}
-@@ -4809,7 +4809,7 @@ static void flush_backlog(struct work_st
+@@ -4831,7 +4831,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@
input_queue_head_incr(sd);
}
}
-@@ -4819,11 +4819,14 @@ static void flush_backlog(struct work_st
+@@ -4841,11 +4841,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@
}
static void flush_all_backlogs(void)
-@@ -5371,7 +5374,9 @@ static int process_backlog(struct napi_s
+@@ -5393,7 +5396,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -5379,9 +5384,9 @@ static int process_backlog(struct napi_s
+@@ -5401,9 +5406,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5821,13 +5826,21 @@ static __latent_entropy void net_rx_acti
+@@ -5843,13 +5848,21 @@ static __latent_entropy void net_rx_acti
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
@@ -133,7 +133,7 @@
for (;;) {
struct napi_struct *n;
-@@ -8790,10 +8803,13 @@ static int dev_cpu_dead(unsigned int old
+@@ -8814,10 +8827,13 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -148,7 +148,7 @@
return 0;
}
-@@ -9099,8 +9115,9 @@ static int __init net_dev_init(void)
+@@ -9123,8 +9139,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index b1e5cb1..d422971 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -111,7 +111,7 @@
}
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2505,6 +2505,7 @@ static void __netif_reschedule(struct Qd
+@@ -2527,6 +2527,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -119,7 +119,7 @@
}
void __netif_schedule(struct Qdisc *q)
-@@ -2567,6 +2568,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2589,6 +2590,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -127,7 +127,7 @@
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3986,6 +3988,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -4008,6 +4010,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -135,7 +135,7 @@
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -5333,12 +5336,14 @@ static void net_rps_action_and_irq_enabl
+@@ -5355,12 +5358,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -150,7 +150,7 @@
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -5416,6 +5421,7 @@ void __napi_schedule(struct napi_struct
+@@ -5438,6 +5443,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -158,7 +158,7 @@
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -8772,6 +8778,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8796,6 +8802,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 50de2f5..beea5a4 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4210,7 +4210,7 @@ static int netif_rx_internal(struct sk_b
+@@ -4232,7 +4232,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4220,14 +4220,14 @@ static int netif_rx_internal(struct sk_b
+@@ -4242,14 +4242,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();