blob: 64e1decfda51f05630f76db79e8596dc2fff93ec [file] [log] [blame]
From: Shakeel Butt <shakeelb@google.com>
Date: Thu, 3 Nov 2022 06:05:13 +0000
Subject: mm: percpu_counter: use race free percpu_counter sum interface
percpu_counter_sum can race with cpu offlining. Add a new interface which
does not race with it and use that for check_mm().
Link: https://lkml.kernel.org/r/20221103171407.ydubp43x7tzahriq@google.com
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/percpu_counter.h | 11 +++++++++++
kernel/fork.c | 2 +-
lib/percpu_counter.c | 24 ++++++++++++++++++------
3 files changed, 30 insertions(+), 7 deletions(-)
--- a/include/linux/percpu_counter.h~mm-convert-mms-rss-stats-into-percpu_counter-fix
+++ a/include/linux/percpu_counter.h
@@ -45,6 +45,7 @@ void percpu_counter_set(struct percpu_co
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
+s64 __percpu_counter_sum_all(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);
@@ -85,6 +86,11 @@ static inline s64 percpu_counter_sum(str
return __percpu_counter_sum(fbc);
}
+static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum_all(fbc);
+}
+
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
@@ -192,6 +198,11 @@ static inline s64 percpu_counter_sum(str
{
return percpu_counter_read(fbc);
}
+
+static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
+{
+ return percpu_counter_read(fbc);
+}
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
--- a/kernel/fork.c~mm-convert-mms-rss-stats-into-percpu_counter-fix
+++ a/kernel/fork.c
@@ -753,7 +753,7 @@ static void check_mm(struct mm_struct *m
"Please make sure 'struct resident_page_types[]' is updated as well");
for (i = 0; i < NR_MM_COUNTERS; i++) {
- long x = percpu_counter_sum(&mm->rss_stat[i]);
+ long x = percpu_counter_sum_all(&mm->rss_stat[i]);
if (unlikely(x))
pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
--- a/lib/percpu_counter.c~mm-convert-mms-rss-stats-into-percpu_counter-fix
+++ a/lib/percpu_counter.c
@@ -117,11 +117,8 @@ void percpu_counter_sync(struct percpu_c
}
EXPORT_SYMBOL(percpu_counter_sync);
-/*
- * Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
+ const struct cpumask *cpu_mask)
{
s64 ret;
int cpu;
@@ -129,15 +126,30 @@ s64 __percpu_counter_sum(struct percpu_c
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, cpu_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum_mask(fbc, cpu_online_mask);
+}
EXPORT_SYMBOL(__percpu_counter_sum);
+s64 __percpu_counter_sum_all(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
+}
+EXPORT_SYMBOL(__percpu_counter_sum_all);
+
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
_