| From: Ingo Molnar <mingo@elte.hu> |
| Date: Fri, 3 Jul 2009 08:30:13 -0500 |
| Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT |
| |
| Disable preemption on -RT for the vmstat code. On vanila the code runs in |
| IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the |
| same ressources is not updated in parallel due to preemption. |
| |
| Signed-off-by: Ingo Molnar <mingo@elte.hu> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| --- |
| include/linux/vmstat.h | 4 ++++ |
| mm/vmstat.c | 12 ++++++++++++ |
| 2 files changed, 16 insertions(+) |
| |
| --- a/include/linux/vmstat.h |
| +++ b/include/linux/vmstat.h |
| @@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, v |
| */ |
| static inline void __count_vm_event(enum vm_event_item item) |
| { |
| + preempt_disable_rt(); |
| raw_cpu_inc(vm_event_states.event[item]); |
| + preempt_enable_rt(); |
| } |
| |
| static inline void count_vm_event(enum vm_event_item item) |
| @@ -43,7 +45,9 @@ static inline void count_vm_event(enum v |
| |
| static inline void __count_vm_events(enum vm_event_item item, long delta) |
| { |
| + preempt_disable_rt(); |
| raw_cpu_add(vm_event_states.event[item], delta); |
| + preempt_enable_rt(); |
| } |
| |
| static inline void count_vm_events(enum vm_event_item item, long delta) |
| --- a/mm/vmstat.c |
| +++ b/mm/vmstat.c |
| @@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone * |
| long x; |
| long t; |
| |
| + preempt_disable_rt(); |
| x = delta + __this_cpu_read(*p); |
| |
| t = __this_cpu_read(pcp->stat_threshold); |
| @@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone * |
| x = 0; |
| } |
| __this_cpu_write(*p, x); |
| + preempt_enable_rt(); |
| } |
| EXPORT_SYMBOL(__mod_zone_page_state); |
| |
| @@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist |
| long x; |
| long t; |
| |
| + preempt_disable_rt(); |
| x = delta + __this_cpu_read(*p); |
| |
| t = __this_cpu_read(pcp->stat_threshold); |
| @@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist |
| x = 0; |
| } |
| __this_cpu_write(*p, x); |
| + preempt_enable_rt(); |
| } |
| EXPORT_SYMBOL(__mod_node_page_state); |
| |
| @@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone, |
| s8 __percpu *p = pcp->vm_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_inc_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v > t)) { |
| @@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone, |
| zone_page_state_add(v + overstep, zone, item); |
| __this_cpu_write(*p, -overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| @@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data |
| s8 __percpu *p = pcp->vm_node_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_inc_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v > t)) { |
| @@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data |
| node_page_state_add(v + overstep, pgdat, item); |
| __this_cpu_write(*p, -overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __inc_zone_page_state(struct page *page, enum zone_stat_item item) |
| @@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone, |
| s8 __percpu *p = pcp->vm_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_dec_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v < - t)) { |
| @@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone, |
| zone_page_state_add(v - overstep, zone, item); |
| __this_cpu_write(*p, overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| @@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data |
| s8 __percpu *p = pcp->vm_node_stat_diff + item; |
| s8 v, t; |
| |
| + preempt_disable_rt(); |
| v = __this_cpu_dec_return(*p); |
| t = __this_cpu_read(pcp->stat_threshold); |
| if (unlikely(v < - t)) { |
| @@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data |
| node_page_state_add(v - overstep, pgdat, item); |
| __this_cpu_write(*p, overstep); |
| } |
| + preempt_enable_rt(); |
| } |
| |
| void __dec_zone_page_state(struct page *page, enum zone_stat_item item) |