| From c1dc509be2d2ff1dfe15d814c3b9df30d5e69f79 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Tue, 6 Apr 2010 16:51:31 +0200 |
| Subject: [PATCH] md: raid5: Make raid5_percpu handling RT aware |
| |
| __raid_run_ops() disables preemption with get_cpu() around the access |
| to the raid5_percpu variables. That causes scheduling while atomic |
| spews on RT. |
| |
| Serialize the access to the percpu data with a lock and keep the code |
| preemptible. |
| |
| Reported-by: Udo van den Heuvel <udovdh@xs4all.nl> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Tested-by: Udo van den Heuvel <udovdh@xs4all.nl> |
| |
| diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
| index da583bb43c84..b377a9c51ec2 100644 |
| --- a/drivers/md/raid5.c |
| +++ b/drivers/md/raid5.c |
| @@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
| struct raid5_percpu *percpu; |
| unsigned long cpu; |
| |
| - cpu = get_cpu(); |
| + cpu = get_cpu_light(); |
| percpu = per_cpu_ptr(conf->percpu, cpu); |
| + spin_lock(&percpu->lock); |
| if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { |
| ops_run_biofill(sh); |
| overlap_clear++; |
| @@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
| if (test_and_clear_bit(R5_Overlap, &dev->flags)) |
| wake_up(&sh->raid_conf->wait_for_overlap); |
| } |
| - put_cpu(); |
| + spin_unlock(&percpu->lock); |
| + put_cpu_light(); |
| } |
| |
| static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, |
| @@ -6438,6 +6440,7 @@ static int raid5_alloc_percpu(struct r5conf *conf) |
| __func__, cpu); |
| break; |
| } |
| + spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); |
| } |
| put_online_cpus(); |
| |
| diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h |
| index 517d4b68a1be..efe91887ecd7 100644 |
| --- a/drivers/md/raid5.h |
| +++ b/drivers/md/raid5.h |
| @@ -504,6 +504,7 @@ struct r5conf { |
| int recovery_disabled; |
| /* per cpu variables */ |
| struct raid5_percpu { |
| + spinlock_t lock; /* Protection for -RT */ |
| struct page *spare_page; /* Used when checking P/Q in raid6 */ |
| struct flex_array *scribble; /* space for constructing buffer |
| * lists and performing address |
| -- |
| 2.5.0 |
| |