| From b825dd15fe085ff18e8fb99dc36c79e8c0a8f702 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Sat, 25 Jul 2009 17:13:33 +0200 |
| Subject: [PATCH] trace: Convert various locks to raw_spinlock |
| |
| commit 87654a70523a8c5baadcbbc07d80cbae8f912837 in tip. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
| index 05a9f83..1a926e8 100644 |
| --- a/kernel/trace/ring_buffer.c |
| +++ b/kernel/trace/ring_buffer.c |
| @@ -423,7 +423,7 @@ int ring_buffer_print_page_header(struct trace_seq *s) |
| struct ring_buffer_per_cpu { |
| int cpu; |
| struct ring_buffer *buffer; |
| - spinlock_t reader_lock; /* serialize readers */ |
| + raw_spinlock_t reader_lock; /* serialize readers */ |
| arch_spinlock_t lock; |
| struct lock_class_key lock_key; |
| struct list_head *pages; |
| @@ -999,7 +999,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) |
| |
| cpu_buffer->cpu = cpu; |
| cpu_buffer->buffer = buffer; |
| - spin_lock_init(&cpu_buffer->reader_lock); |
| + raw_spin_lock_init(&cpu_buffer->reader_lock); |
| lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
| cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| |
| @@ -2740,9 +2740,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) |
| |
| cpu_buffer = iter->cpu_buffer; |
| |
| - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| rb_iter_reset(iter); |
| - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| } |
| EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); |
| |
| @@ -3176,12 +3176,12 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
| again: |
| local_irq_save(flags); |
| if (dolock) |
| - spin_lock(&cpu_buffer->reader_lock); |
| + raw_spin_lock(&cpu_buffer->reader_lock); |
| event = rb_buffer_peek(cpu_buffer, ts); |
| if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| rb_advance_reader(cpu_buffer); |
| if (dolock) |
| - spin_unlock(&cpu_buffer->reader_lock); |
| + raw_spin_unlock(&cpu_buffer->reader_lock); |
| local_irq_restore(flags); |
| |
| if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| @@ -3206,9 +3206,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
| unsigned long flags; |
| |
| again: |
| - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| event = rb_iter_peek(iter, ts); |
| - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| |
| if (event && event->type_len == RINGBUF_TYPE_PADDING) |
| goto again; |
| @@ -3244,14 +3244,14 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) |
| cpu_buffer = buffer->buffers[cpu]; |
| local_irq_save(flags); |
| if (dolock) |
| - spin_lock(&cpu_buffer->reader_lock); |
| + raw_spin_lock(&cpu_buffer->reader_lock); |
| |
| event = rb_buffer_peek(cpu_buffer, ts); |
| if (event) |
| rb_advance_reader(cpu_buffer); |
| |
| if (dolock) |
| - spin_unlock(&cpu_buffer->reader_lock); |
| + raw_spin_unlock(&cpu_buffer->reader_lock); |
| local_irq_restore(flags); |
| |
| out: |
| @@ -3297,11 +3297,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) |
| atomic_inc(&cpu_buffer->record_disabled); |
| synchronize_sched(); |
| |
| - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| arch_spin_lock(&cpu_buffer->lock); |
| rb_iter_reset(iter); |
| arch_spin_unlock(&cpu_buffer->lock); |
| - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| |
| return iter; |
| } |
| @@ -3338,7 +3338,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) |
| struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
| unsigned long flags; |
| |
| - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| again: |
| event = rb_iter_peek(iter, ts); |
| if (!event) |
| @@ -3349,7 +3349,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) |
| |
| rb_advance_iter(iter); |
| out: |
| - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| |
| return event; |
| } |
| @@ -3415,7 +3415,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) |
| |
| atomic_inc(&cpu_buffer->record_disabled); |
| |
| - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| |
| if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
| goto out; |
| @@ -3427,7 +3427,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) |
| arch_spin_unlock(&cpu_buffer->lock); |
| |
| out: |
| - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| |
| atomic_dec(&cpu_buffer->record_disabled); |
| } |
| @@ -3465,10 +3465,10 @@ int ring_buffer_empty(struct ring_buffer *buffer) |
| cpu_buffer = buffer->buffers[cpu]; |
| local_irq_save(flags); |
| if (dolock) |
| - spin_lock(&cpu_buffer->reader_lock); |
| + raw_spin_lock(&cpu_buffer->reader_lock); |
| ret = rb_per_cpu_empty(cpu_buffer); |
| if (dolock) |
| - spin_unlock(&cpu_buffer->reader_lock); |
| + raw_spin_unlock(&cpu_buffer->reader_lock); |
| local_irq_restore(flags); |
| |
| if (!ret) |
| @@ -3499,10 +3499,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) |
| cpu_buffer = buffer->buffers[cpu]; |
| local_irq_save(flags); |
| if (dolock) |
| - spin_lock(&cpu_buffer->reader_lock); |
| + raw_spin_lock(&cpu_buffer->reader_lock); |
| ret = rb_per_cpu_empty(cpu_buffer); |
| if (dolock) |
| - spin_unlock(&cpu_buffer->reader_lock); |
| + raw_spin_unlock(&cpu_buffer->reader_lock); |
| local_irq_restore(flags); |
| |
| return ret; |
| @@ -3697,7 +3697,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, |
| if (!bpage) |
| goto out; |
| |
| - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| |
| reader = rb_get_reader_page(cpu_buffer); |
| if (!reader) |
| @@ -3772,7 +3772,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, |
| ret = read; |
| |
| out_unlock: |
| - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| |
| out: |
| return ret; |
| diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
| index 3ec2ee6..9058eab 100644 |
| --- a/kernel/trace/trace.c |
| +++ b/kernel/trace/trace.c |
| @@ -335,7 +335,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
| TRACE_ITER_GRAPH_TIME; |
| |
| static int trace_stop_count; |
| -static DEFINE_SPINLOCK(tracing_start_lock); |
| +static DEFINE_RAW_SPINLOCK(tracing_start_lock); |
| |
| /** |
| * trace_wake_up - wake up tasks waiting for trace input |
| @@ -940,7 +940,7 @@ void tracing_start(void) |
| if (tracing_disabled) |
| return; |
| |
| - spin_lock_irqsave(&tracing_start_lock, flags); |
| + raw_spin_lock_irqsave(&tracing_start_lock, flags); |
| if (--trace_stop_count) { |
| if (trace_stop_count < 0) { |
| /* Someone screwed up their debugging */ |
| @@ -965,7 +965,7 @@ void tracing_start(void) |
| |
| ftrace_start(); |
| out: |
| - spin_unlock_irqrestore(&tracing_start_lock, flags); |
| + raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
| } |
| |
| /** |
| @@ -980,7 +980,7 @@ void tracing_stop(void) |
| unsigned long flags; |
| |
| ftrace_stop(); |
| - spin_lock_irqsave(&tracing_start_lock, flags); |
| + raw_spin_lock_irqsave(&tracing_start_lock, flags); |
| if (trace_stop_count++) |
| goto out; |
| |
| @@ -998,7 +998,7 @@ void tracing_stop(void) |
| arch_spin_unlock(&ftrace_max_lock); |
| |
| out: |
| - spin_unlock_irqrestore(&tracing_start_lock, flags); |
| + raw_spin_unlock_irqrestore(&tracing_start_lock, flags); |
| } |
| |
| void trace_stop_cmdline_recording(void); |
| diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c |
| index 2974bc7..60ba58e 100644 |
| --- a/kernel/trace/trace_irqsoff.c |
| +++ b/kernel/trace/trace_irqsoff.c |
| @@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly; |
| |
| static DEFINE_PER_CPU(int, tracing_cpu); |
| |
| -static DEFINE_SPINLOCK(max_trace_lock); |
| +static DEFINE_RAW_SPINLOCK(max_trace_lock); |
| |
| enum { |
| TRACER_IRQS_OFF = (1 << 1), |
| @@ -144,7 +144,7 @@ check_critical_timing(struct trace_array *tr, |
| if (!report_latency(delta)) |
| goto out; |
| |
| - spin_lock_irqsave(&max_trace_lock, flags); |
| + raw_spin_lock_irqsave(&max_trace_lock, flags); |
| |
| /* check if we are still the max latency */ |
| if (!report_latency(delta)) |
| @@ -167,7 +167,7 @@ check_critical_timing(struct trace_array *tr, |
| max_sequence++; |
| |
| out_unlock: |
| - spin_unlock_irqrestore(&max_trace_lock, flags); |
| + raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
| |
| out: |
| data->critical_sequence = max_sequence; |
| -- |
| 1.7.0.4 |
| |