| From: Mike Galbraith <umgwanakikbuti@gmail.com> |
| Date: Sun, 2 Nov 2014 08:31:37 +0100 |
| Subject: x86: UV: raw_spinlock conversion |
| |
| Shrug. Lots of hobbyists have a beast in their basement, right? |
| |
| |
| Signed-off-by: Mike Galbraith <mgalbraith@suse.de> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| arch/x86/include/asm/uv/uv_bau.h | 14 +++++++------- |
| arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++------------- |
| arch/x86/platform/uv/uv_time.c | 20 ++++++++++++-------- |
| 3 files changed, 32 insertions(+), 28 deletions(-) |
| |
| --- a/arch/x86/include/asm/uv/uv_bau.h |
| +++ b/arch/x86/include/asm/uv/uv_bau.h |
| @@ -624,9 +624,9 @@ struct bau_control { |
| cycles_t send_message; |
| cycles_t period_end; |
| cycles_t period_time; |
| - spinlock_t uvhub_lock; |
| - spinlock_t queue_lock; |
| - spinlock_t disable_lock; |
| + raw_spinlock_t uvhub_lock; |
| + raw_spinlock_t queue_lock; |
| + raw_spinlock_t disable_lock; |
| /* tunables */ |
| int max_concurr; |
| int max_concurr_const; |
| @@ -815,15 +815,15 @@ static inline int atom_asr(short i, stru |
| * to be lowered below the current 'v'. atomic_add_unless can only stop |
| * on equal. |
| */ |
| -static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) |
| +static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) |
| { |
| - spin_lock(lock); |
| + raw_spin_lock(lock); |
| if (atomic_read(v) >= u) { |
| - spin_unlock(lock); |
| + raw_spin_unlock(lock); |
| return 0; |
| } |
| atomic_inc(v); |
| - spin_unlock(lock); |
| + raw_spin_unlock(lock); |
| return 1; |
| } |
| |
| --- a/arch/x86/platform/uv/tlb_uv.c |
| +++ b/arch/x86/platform/uv/tlb_uv.c |
| @@ -747,9 +747,9 @@ static void destination_plugged(struct b |
| |
| quiesce_local_uvhub(hmaster); |
| |
| - spin_lock(&hmaster->queue_lock); |
| + raw_spin_lock(&hmaster->queue_lock); |
| reset_with_ipi(&bau_desc->distribution, bcp); |
| - spin_unlock(&hmaster->queue_lock); |
| + raw_spin_unlock(&hmaster->queue_lock); |
| |
| end_uvhub_quiesce(hmaster); |
| |
| @@ -769,9 +769,9 @@ static void destination_timeout(struct b |
| |
| quiesce_local_uvhub(hmaster); |
| |
| - spin_lock(&hmaster->queue_lock); |
| + raw_spin_lock(&hmaster->queue_lock); |
| reset_with_ipi(&bau_desc->distribution, bcp); |
| - spin_unlock(&hmaster->queue_lock); |
| + raw_spin_unlock(&hmaster->queue_lock); |
| |
| end_uvhub_quiesce(hmaster); |
| |
| @@ -792,7 +792,7 @@ static void disable_for_period(struct ba |
| cycles_t tm1; |
| |
| hmaster = bcp->uvhub_master; |
| - spin_lock(&hmaster->disable_lock); |
| + raw_spin_lock(&hmaster->disable_lock); |
| if (!bcp->baudisabled) { |
| stat->s_bau_disabled++; |
| tm1 = get_cycles(); |
| @@ -805,7 +805,7 @@ static void disable_for_period(struct ba |
| } |
| } |
| } |
| - spin_unlock(&hmaster->disable_lock); |
| + raw_spin_unlock(&hmaster->disable_lock); |
| } |
| |
| static void count_max_concurr(int stat, struct bau_control *bcp, |
| @@ -868,7 +868,7 @@ static void record_send_stats(cycles_t t |
| */ |
| static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) |
| { |
| - spinlock_t *lock = &hmaster->uvhub_lock; |
| + raw_spinlock_t *lock = &hmaster->uvhub_lock; |
| atomic_t *v; |
| |
| v = &hmaster->active_descriptor_count; |
| @@ -1001,7 +1001,7 @@ static int check_enable(struct bau_contr |
| struct bau_control *hmaster; |
| |
| hmaster = bcp->uvhub_master; |
| - spin_lock(&hmaster->disable_lock); |
| + raw_spin_lock(&hmaster->disable_lock); |
| if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { |
| stat->s_bau_reenabled++; |
| for_each_present_cpu(tcpu) { |
| @@ -1013,10 +1013,10 @@ static int check_enable(struct bau_contr |
| tbcp->period_giveups = 0; |
| } |
| } |
| - spin_unlock(&hmaster->disable_lock); |
| + raw_spin_unlock(&hmaster->disable_lock); |
| return 0; |
| } |
| - spin_unlock(&hmaster->disable_lock); |
| + raw_spin_unlock(&hmaster->disable_lock); |
| return -1; |
| } |
| |
| @@ -1938,9 +1938,9 @@ static void __init init_per_cpu_tunables |
| bcp->cong_reps = congested_reps; |
| bcp->disabled_period = sec_2_cycles(disabled_period); |
| bcp->giveup_limit = giveup_limit; |
| - spin_lock_init(&bcp->queue_lock); |
| - spin_lock_init(&bcp->uvhub_lock); |
| - spin_lock_init(&bcp->disable_lock); |
| + raw_spin_lock_init(&bcp->queue_lock); |
| + raw_spin_lock_init(&bcp->uvhub_lock); |
| + raw_spin_lock_init(&bcp->disable_lock); |
| } |
| } |
| |
| --- a/arch/x86/platform/uv/uv_time.c |
| +++ b/arch/x86/platform/uv/uv_time.c |
| @@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event |
| |
| /* There is one of these allocated per node */ |
| struct uv_rtc_timer_head { |
| - spinlock_t lock; |
| + raw_spinlock_t lock; |
| /* next cpu waiting for timer, local node relative: */ |
| int next_cpu; |
| /* number of cpus on this node: */ |
| @@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers |
| uv_rtc_deallocate_timers(); |
| return -ENOMEM; |
| } |
| - spin_lock_init(&head->lock); |
| + raw_spin_lock_init(&head->lock); |
| head->ncpus = uv_blade_nr_possible_cpus(bid); |
| head->next_cpu = -1; |
| blade_info[bid] = head; |
| @@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 |
| unsigned long flags; |
| int next_cpu; |
| |
| - spin_lock_irqsave(&head->lock, flags); |
| + raw_spin_lock_irqsave(&head->lock, flags); |
| |
| next_cpu = head->next_cpu; |
| *t = expires; |
| @@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 |
| if (uv_setup_intr(cpu, expires)) { |
| *t = ULLONG_MAX; |
| uv_rtc_find_next_timer(head, pnode); |
| - spin_unlock_irqrestore(&head->lock, flags); |
| + raw_spin_unlock_irqrestore(&head->lock, flags); |
| return -ETIME; |
| } |
| } |
| |
| - spin_unlock_irqrestore(&head->lock, flags); |
| + raw_spin_unlock_irqrestore(&head->lock, flags); |
| return 0; |
| } |
| |
| @@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, i |
| unsigned long flags; |
| int rc = 0; |
| |
| - spin_lock_irqsave(&head->lock, flags); |
| + raw_spin_lock_irqsave(&head->lock, flags); |
| |
| if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) |
| rc = 1; |
| @@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, i |
| uv_rtc_find_next_timer(head, pnode); |
| } |
| |
| - spin_unlock_irqrestore(&head->lock, flags); |
| + raw_spin_unlock_irqrestore(&head->lock, flags); |
| |
| return rc; |
| } |
| @@ -299,13 +299,17 @@ static int uv_rtc_unset_timer(int cpu, i |
| static u64 uv_read_rtc(struct clocksource *cs) |
| { |
| unsigned long offset; |
| + u64 cycles; |
| |
| + preempt_disable(); |
| if (uv_get_min_hub_revision_id() == 1) |
| offset = 0; |
| else |
| offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; |
| |
| - return (u64)uv_read_local_mmr(UVH_RTC | offset); |
| + cycles = (u64)uv_read_local_mmr(UVH_RTC | offset); |
| + preempt_enable(); |
| + return cycles; |
| } |
| |
| /* |