blob: b80595dc4050960045e8f3c9d3e1849a17b92e28 [file] [log] [blame]
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 14 Feb 2020 18:51:43 +0100
Subject: [PATCH] rcu: Use a raw_spinlock_t for kfree batching
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/rcu/tree.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2719,7 +2719,7 @@ struct kfree_rcu_cpu_work {
struct kfree_rcu_cpu {
struct rcu_head *head;
struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
- spinlock_t lock;
+ raw_spinlock_t lock;
struct delayed_work monitor_work;
bool monitor_todo;
bool initialized;
@@ -2741,10 +2741,10 @@ static void kfree_rcu_work(struct work_s
krwp = container_of(to_rcu_work(work),
struct kfree_rcu_cpu_work, rcu_work);
krcp = krwp->krcp;
- spin_lock_irqsave(&krcp->lock, flags);
+ raw_spin_lock_irqsave(&krcp->lock, flags);
head = krwp->head_free;
krwp->head_free = NULL;
- spin_unlock_irqrestore(&krcp->lock, flags);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
// List "head" is now private, so traverse locklessly.
for (; head; head = next) {
@@ -2803,14 +2803,14 @@ static inline void kfree_rcu_drain_unloc
krcp->monitor_todo = false;
if (queue_kfree_rcu_work(krcp)) {
// Success! Our job is done here.
- spin_unlock_irqrestore(&krcp->lock, flags);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
return;
}
// Previous RCU batch still in progress, try again later.
krcp->monitor_todo = true;
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
- spin_unlock_irqrestore(&krcp->lock, flags);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
/*
@@ -2823,11 +2823,11 @@ static void kfree_rcu_monitor(struct wor
struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
monitor_work.work);
- spin_lock_irqsave(&krcp->lock, flags);
+ raw_spin_lock_irqsave(&krcp->lock, flags);
if (krcp->monitor_todo)
kfree_rcu_drain_unlock(krcp, flags);
else
- spin_unlock_irqrestore(&krcp->lock, flags);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
/*
@@ -2852,7 +2852,7 @@ void kfree_call_rcu(struct rcu_head *hea
local_irq_save(flags); // For safely calling this_cpu_ptr().
krcp = this_cpu_ptr(&krc);
if (krcp->initialized)
- spin_lock(&krcp->lock);
+ raw_spin_lock(&krcp->lock);
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(head)) {
@@ -2874,7 +2874,7 @@ void kfree_call_rcu(struct rcu_head *hea
unlock_return:
if (krcp->initialized)
- spin_unlock(&krcp->lock);
+ raw_spin_unlock(&krcp->lock);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);
@@ -2887,15 +2887,15 @@ void __init kfree_rcu_scheduler_running(
for_each_online_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
- spin_lock_irqsave(&krcp->lock, flags);
+ raw_spin_lock_irqsave(&krcp->lock, flags);
if (!krcp->head || krcp->monitor_todo) {
- spin_unlock_irqrestore(&krcp->lock, flags);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
continue;
}
krcp->monitor_todo = true;
schedule_delayed_work_on(cpu, &krcp->monitor_work,
KFREE_DRAIN_JIFFIES);
- spin_unlock_irqrestore(&krcp->lock, flags);
+ raw_spin_unlock_irqrestore(&krcp->lock, flags);
}
}
@@ -3780,7 +3780,7 @@ static void __init kfree_rcu_batch_init(
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
- spin_lock_init(&krcp->lock);
+ raw_spin_lock_init(&krcp->lock);
for (i = 0; i < KFREE_N_BATCHES; i++)
krcp->krw_arr[i].krcp = krcp;
INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);