| From: Ingo Molnar <mingo@elte.hu> |
| Date: Fri, 3 Jul 2009 08:30:13 -0500 |
| Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT |
| |
| Disable preemption on -RT for the vmstat code. On vanila the code runs in |
| IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the |
| same ressources is not updated in parallel due to preemption. |
| |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| --- |
| include/linux/vmstat.h | 4 ++++ |
| mm/vmstat.c | 12 ++++++++++++ |
| 2 files changed, 16 insertions(+) |
| |
| --- a/include/linux/vmstat.h |
| +++ b/include/linux/vmstat.h |
| @@ -60,7 +60,9 @@ DECLARE_PER_CPU(struct vm_event_state, v |
| */ |
| static inline void __count_vm_event(enum vm_event_item item) |
| { |
| + preempt_disable_rt(); |
| raw_cpu_inc(vm_event_states.event[item]); |
| + preempt_enable_rt(); |
| } |
| |
| static inline void count_vm_event(enum vm_event_item item) |
| @@ -70,7 +72,9 @@ static inline void count_vm_event(enum v |
| |
| static inline void __count_vm_events(enum vm_event_item item, long delta) |
| { |
| + preempt_disable_rt(); |
| raw_cpu_add(vm_event_states.event[item], delta); |
| + preempt_enable_rt(); |
| } |
| |
| static inline void count_vm_events(enum vm_event_item item, long delta) |
| --- a/mm/vmstat.c |
| +++ b/mm/vmstat.c |
| @@ -321,6 +321,7 @@ void __mod_zone_page_state(struct zone * |
| long x; |
| long t; |
| |
| + preempt_disable_rt(); |
| x = delta + __this_cpu_read(*p); |
| |
| t = __this_cpu_read(pcp->stat_threshold); |
| @@ -330,6 +331,7 @@ void __mod_zone_page_state(struct zone * |
| x = 0; |
| } |
| __this_cpu_write(*p, x); |
| + preempt_enable_rt(); |
| } |
| EXPORT_SYMBOL(__mod_zone_page_state); |
| |
| @@ -341,6 +343,7 @@ void __mod_node_page_state(struct pglist |
| long x; |
| long t; |
| |
| + preempt_disable_rt(); |
| x = delta + __this_cpu_read(*p); |
| |
| t = __this_cpu_read(pcp->stat_threshold); |
| @@ -350,6 +353,7 @@ void __mod_node_page_state(struct pglist |
| x = 0; |
| } |
| __this_cpu_write(*p, x); |
| + preempt_enable_rt(); |
| } |
| EXPORT_SYMBOL(__mod_node_page_state); |
| |
| @@ -382,6 +386,7 @@ void __inc_zone_state(struct zone *zone, |
| s8 __percpu *p = pcp->vm_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_inc_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v > t)) { |
| @@ -390,6 +395,7 @@ void __inc_zone_state(struct zone *zone, |
| zone_page_state_add(v + overstep, zone, item); |
| __this_cpu_write(*p, -overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| @@ -398,6 +404,7 @@ void __inc_node_state(struct pglist_data |
| s8 __percpu *p = pcp->vm_node_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_inc_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v > t)) { |
| @@ -406,6 +413,7 @@ void __inc_node_state(struct pglist_data |
| node_page_state_add(v + overstep, pgdat, item); |
| __this_cpu_write(*p, -overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __inc_zone_page_state(struct page *page, enum zone_stat_item item) |
| @@ -426,6 +434,7 @@ void __dec_zone_state(struct zone *zone, |
| s8 __percpu *p = pcp->vm_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_dec_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v < - t)) { |
| @@ -434,6 +443,7 @@ void __dec_zone_state(struct zone *zone, |
| zone_page_state_add(v - overstep, zone, item); |
| __this_cpu_write(*p, overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| @@ -442,6 +452,7 @@ void __dec_node_state(struct pglist_data |
| s8 __percpu *p = pcp->vm_node_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_dec_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v < - t)) { |
| @@ -450,6 +461,7 @@ void __dec_node_state(struct pglist_data |
| node_page_state_add(v - overstep, pgdat, item); |
| __this_cpu_write(*p, overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __dec_zone_page_state(struct page *page, enum zone_stat_item item) |