blob: 34b5f818f923fa6987998ae8e991f709420a2c38 [file] [log] [blame]
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:13 -0500
Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT
Disable preemption on -RT for the vmstat code. On vanila the code runs in
IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
same ressources is not updated in parallel due to preemption.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/vmstat.h | 4 ++++
mm/vmstat.c | 6 ++++++
2 files changed, 10 insertions(+)
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, v
*/
static inline void __count_vm_event(enum vm_event_item item)
{
+ preempt_disable_rt();
raw_cpu_inc(vm_event_states.event[item]);
+ preempt_enable_rt();
}
static inline void count_vm_event(enum vm_event_item item)
@@ -43,7 +45,9 @@ static inline void count_vm_event(enum v
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
+ preempt_disable_rt();
raw_cpu_add(vm_event_states.event[item], delta);
+ preempt_enable_rt();
}
static inline void count_vm_events(enum vm_event_item item, long delta)
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -226,6 +226,7 @@ void __mod_zone_page_state(struct zone *
long x;
long t;
+ preempt_disable_rt();
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
@@ -235,6 +236,7 @@ void __mod_zone_page_state(struct zone *
x = 0;
}
__this_cpu_write(*p, x);
+ preempt_enable_rt();
}
EXPORT_SYMBOL(__mod_zone_page_state);
@@ -267,6 +269,7 @@ void __inc_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
+ preempt_disable_rt();
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
@@ -275,6 +278,7 @@ void __inc_zone_state(struct zone *zone,
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
+ preempt_enable_rt();
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
@@ -289,6 +293,7 @@ void __dec_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
+ preempt_disable_rt();
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
@@ -297,6 +302,7 @@ void __dec_zone_state(struct zone *zone,
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
+ preempt_enable_rt();
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)