| From 5231dbb6b0f74d1cf7b22094b170e6a105e13ea8 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Sat, 25 Jul 2009 16:43:30 +0200 |
| Subject: [PATCH] proportions: Convert to raw_spinlck |
| |
| commit 0fc7741cfd53c5c5ca710e075e05808e1bf9be71 in tip. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/include/linux/proportions.h b/include/linux/proportions.h |
| index cf793bb..ef35bb7 100644 |
| --- a/include/linux/proportions.h |
| +++ b/include/linux/proportions.h |
| @@ -58,7 +58,7 @@ struct prop_local_percpu { |
| */ |
| int shift; |
| unsigned long period; |
| - spinlock_t lock; /* protect the snapshot state */ |
| + raw_spinlock_t lock; /* protect the snapshot state */ |
| }; |
| |
| int prop_local_init_percpu(struct prop_local_percpu *pl); |
| @@ -106,11 +106,11 @@ struct prop_local_single { |
| */ |
| unsigned long period; |
| int shift; |
| - spinlock_t lock; /* protect the snapshot state */ |
| + raw_spinlock_t lock; /* protect the snapshot state */ |
| }; |
| |
| #define INIT_PROP_LOCAL_SINGLE(name) \ |
| -{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
| +{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
| } |
| |
| int prop_local_init_single(struct prop_local_single *pl); |
| diff --git a/lib/proportions.c b/lib/proportions.c |
| index d50746a..05df848 100644 |
| --- a/lib/proportions.c |
| +++ b/lib/proportions.c |
| @@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) |
| |
| int prop_local_init_percpu(struct prop_local_percpu *pl) |
| { |
| - spin_lock_init(&pl->lock); |
| + raw_spin_lock_init(&pl->lock); |
| pl->shift = 0; |
| pl->period = 0; |
| return percpu_counter_init(&pl->events, 0); |
| @@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) |
| if (pl->period == global_period) |
| return; |
| |
| - spin_lock_irqsave(&pl->lock, flags); |
| + raw_spin_lock_irqsave(&pl->lock, flags); |
| prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
| |
| /* |
| @@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) |
| percpu_counter_set(&pl->events, 0); |
| |
| pl->period = global_period; |
| - spin_unlock_irqrestore(&pl->lock, flags); |
| + raw_spin_unlock_irqrestore(&pl->lock, flags); |
| } |
| |
| /* |
| @@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd, |
| |
| int prop_local_init_single(struct prop_local_single *pl) |
| { |
| - spin_lock_init(&pl->lock); |
| + raw_spin_lock_init(&pl->lock); |
| pl->shift = 0; |
| pl->period = 0; |
| pl->events = 0; |
| @@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) |
| if (pl->period == global_period) |
| return; |
| |
| - spin_lock_irqsave(&pl->lock, flags); |
| + raw_spin_lock_irqsave(&pl->lock, flags); |
| prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
| /* |
| * For each missed period, we half the local counter. |
| @@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) |
| else |
| pl->events = 0; |
| pl->period = global_period; |
| - spin_unlock_irqrestore(&pl->lock, flags); |
| + raw_spin_unlock_irqrestore(&pl->lock, flags); |
| } |
| |
| /* |
| -- |
| 1.7.1.1 |
| |