blob: e4d2408d0c1e2da88a86dc8f45444dd191eb7f89 [file] [log] [blame]
From 671165384695671dc526ad9a1c37fd38904e6475 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Mar 2012 15:14:06 +0100
Subject: [PATCH 012/274] timekeeping: Split xtime_lock
xtime_lock is going to be split apart in mainline, so we can shorten
the seqcount protected regions and avoid updating seqcount in some
code pathes. This is a straight forward split, so we can avoid the
whole mess with raw seqlocks for RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time/jiffies.c | 4 +-
kernel/time/tick-common.c | 10 ++--
kernel/time/tick-internal.h | 3 +-
kernel/time/tick-sched.c | 16 +++---
kernel/time/timekeeping.c | 114 +++++++++++++++++++++++++------------------
5 files changed, 87 insertions(+), 60 deletions(-)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154..21940eb 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -74,9 +74,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index da6c9ec..39de540 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4e265b9..c91100d 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,4 +141,5 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
+extern raw_spinlock_t xtime_lock;
+extern seqcount_t xtime_seq;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2a8766c..d81185f 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
/*
@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
return period;
}
@@ -317,11 +321,11 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 12843e9..f4e5eaf 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -76,8 +76,9 @@ struct timekeeper {
/* Offset clock monotonic -> clock boottime */
ktime_t offs_boot;
- /* Seqlock for all timekeeper values */
- seqlock_t lock;
+ /* Open coded seqlock for all timekeeper values */
+ seqcount_t seq;
+ raw_spinlock_t lock;
};
static struct timekeeper timekeeper;
@@ -86,7 +87,8 @@ static struct timekeeper timekeeper;
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
+seqcount_t xtime_seq;
/* flag for if timekeeping is suspended */
@@ -243,7 +245,7 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
nsecs = timekeeping_get_ns();
@@ -251,7 +253,7 @@ void getnstimeofday(struct timespec *ts)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -266,7 +268,7 @@ ktime_t ktime_get(void)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
secs = timekeeper.xtime.tv_sec +
timekeeper.wall_to_monotonic.tv_sec;
nsecs = timekeeper.xtime.tv_nsec +
@@ -275,7 +277,7 @@ ktime_t ktime_get(void)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -301,14 +303,14 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
tomono = timekeeper.wall_to_monotonic;
nsecs = timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec + nsecs);
@@ -336,7 +338,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
do {
u32 arch_offset;
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts_raw = timekeeper.raw_time;
*ts_real = timekeeper.xtime;
@@ -349,7 +351,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
nsecs_raw += arch_offset;
nsecs_real += arch_offset;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -388,7 +390,8 @@ int do_settimeofday(const struct timespec *tv)
if (!timespec_valid_strict(tv))
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -400,7 +403,8 @@ int do_settimeofday(const struct timespec *tv)
timekeeper.xtime = *tv;
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -426,7 +430,8 @@ int timekeeping_inject_offset(struct timespec *ts)
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -443,7 +448,8 @@ int timekeeping_inject_offset(struct timespec *ts)
error: /* even if we error out, we forwarded the time, so call update */
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -464,7 +470,8 @@ static int change_clocksource(void *data)
new = (struct clocksource *) data;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
if (!new->enable || new->enable(new) == 0) {
@@ -475,7 +482,8 @@ static int change_clocksource(void *data)
}
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
return 0;
}
@@ -522,11 +530,11 @@ void getrawmonotonic(struct timespec *ts)
s64 nsecs;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
nsecs = timekeeping_get_ns_raw();
*ts = timekeeper.raw_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -542,11 +550,11 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return ret;
}
@@ -559,11 +567,11 @@ u64 timekeeping_max_deferment(void)
unsigned long seq;
u64 ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
ret = timekeeper.clock->max_idle_ns;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return ret;
}
@@ -623,11 +631,13 @@ void __init timekeeping_init(void)
boot.tv_nsec = 0;
}
- seqlock_init(&timekeeper.lock);
+ raw_spin_lock_init(&timekeeper.lock);
+ seqcount_init(&timekeeper.seq);
ntp_init();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
@@ -646,7 +656,8 @@ void __init timekeeping_init(void)
update_rt_offset();
timekeeper.total_sleep_time.tv_sec = 0;
timekeeper.total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
}
/* time in seconds when suspend began */
@@ -700,7 +711,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -708,7 +720,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -731,7 +744,8 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
@@ -742,7 +756,8 @@ static void timekeeping_resume(void)
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update(false);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
touch_softlockup_watchdog();
@@ -760,7 +775,8 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
timekeeping_suspended = 1;
@@ -783,7 +799,8 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -1044,7 +1061,8 @@ static void update_wall_time(void)
int shift = 0, maxshift;
unsigned long flags;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
@@ -1137,8 +1155,8 @@ static void update_wall_time(void)
timekeeping_update(false);
out:
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
-
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
}
/**
@@ -1184,13 +1202,13 @@ void get_monotonic_boottime(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
tomono = timekeeper.wall_to_monotonic;
sleep = timekeeper.total_sleep_time;
nsecs = timekeeping_get_ns();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
@@ -1241,10 +1259,10 @@ struct timespec current_kernel_time(void)
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
now = timekeeper.xtime;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return now;
}
@@ -1256,11 +1274,11 @@ struct timespec get_monotonic_coarse(void)
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
now = timekeeper.xtime;
mono = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1292,11 +1310,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*xtim = timekeeper.xtime;
*wtom = timekeeper.wall_to_monotonic;
*sleep = timekeeper.total_sleep_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1342,9 +1360,9 @@ ktime_t ktime_get_monotonic_offset(void)
struct timespec wtom;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
wtom = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return timespec_to_ktime(wtom);
}
@@ -1359,7 +1377,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
--
1.7.10.4