blob: 7fb90dbfdc7ea306f456a882b2bd374c95a34996 [file] [log] [blame]
Subject: timekeeping: Split xtime_lock
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Mar 2012 15:14:06 +0100
xtime_lock is going to be split apart in mainline, so we can shorten
the seqcount protected regions and avoid updating seqcount in some
code pathes. This is a straight forward split, so we can avoid the
whole mess with raw seqlocks for RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time/jiffies.c | 4 -
kernel/time/tick-common.c | 10 ++-
kernel/time/tick-internal.h | 3 -
kernel/time/tick-sched.c | 16 +++---
kernel/time/timekeeping.c | 116 +++++++++++++++++++++++++-------------------
5 files changed, 88 insertions(+), 61 deletions(-)
Index: linux-stable/kernel/time/jiffies.c
===================================================================
--- linux-stable.orig/kernel/time/jiffies.c
+++ linux-stable/kernel/time/jiffies.c
@@ -74,9 +74,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
Index: linux-stable/kernel/time/tick-common.c
===================================================================
--- linux-stable.orig/kernel/time/tick-common.c
+++ linux-stable/kernel/time/tick-common.c
@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_ev
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
Index: linux-stable/kernel/time/tick-internal.h
===================================================================
--- linux-stable.orig/kernel/time/tick-internal.h
+++ linux-stable/kernel/time/tick-internal.h
@@ -141,4 +141,5 @@ static inline int tick_device_is_functio
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
+extern raw_spinlock_t xtime_lock;
+extern seqcount_t xtime_seq;
Index: linux-stable/kernel/time/tick-sched.c
===================================================================
--- linux-stable.orig/kernel/time/tick-sched.c
+++ linux-stable/kernel/time/tick-sched.c
@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(kti
return;
/* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(kti
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
/*
@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(vo
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
return period;
}
@@ -282,11 +286,11 @@ static ktime_t tick_nohz_stop_sched_tick
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
Index: linux-stable/kernel/time/timekeeping.c
===================================================================
--- linux-stable.orig/kernel/time/timekeeping.c
+++ linux-stable/kernel/time/timekeeping.c
@@ -74,7 +74,8 @@ struct timekeeper {
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec raw_time;
/* Seqlock for all timekeeper values */
- seqlock_t lock;
+ seqcount_t seq;
+ raw_spinlock_t lock;
};
static struct timekeeper timekeeper;
@@ -83,7 +84,8 @@ static struct timekeeper timekeeper;
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
+seqcount_t xtime_seq;
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
@@ -300,12 +302,12 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
ts->tv_sec = tk->xtime_sec;
nsecs = timekeeping_get_ns(tk);
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
ts->tv_nsec = 0;
timespec_add_ns(ts, nsecs);
@@ -321,11 +323,11 @@ ktime_t ktime_get(void)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -352,12 +354,12 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
ts->tv_sec = tk->xtime_sec;
nsec = timekeeping_get_ns(tk);
tomono = tk->wall_to_monotonic;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
ts->tv_sec += tomono.tv_sec;
ts->tv_nsec = 0;
@@ -385,7 +387,7 @@ void getnstime_raw_and_real(struct times
WARN_ON_ONCE(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
*ts_raw = tk->raw_time;
ts_real->tv_sec = tk->xtime_sec;
@@ -394,7 +396,7 @@ void getnstime_raw_and_real(struct times
nsecs_raw = timekeeping_get_ns_raw(tk);
nsecs_real = timekeeping_get_ns(tk);
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -434,7 +436,8 @@ int do_settimeofday(const struct timespe
if (!timespec_valid_strict(tv))
return -EINVAL;
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
timekeeping_forward_now(tk);
@@ -448,7 +451,8 @@ int do_settimeofday(const struct timespe
timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -473,7 +477,8 @@ int timekeeping_inject_offset(struct tim
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
timekeeping_forward_now(tk);
@@ -490,7 +495,8 @@ int timekeeping_inject_offset(struct tim
error: /* even if we error out, we forwarded the time, so call update */
timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -512,7 +518,8 @@ static int change_clocksource(void *data
new = (struct clocksource *) data;
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
timekeeping_forward_now(tk);
if (!new->enable || new->enable(new) == 0) {
@@ -523,7 +530,8 @@ static int change_clocksource(void *data
}
timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
return 0;
}
@@ -573,11 +581,11 @@ void getrawmonotonic(struct timespec *ts
s64 nsecs;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
nsecs = timekeeping_get_ns_raw(tk);
*ts = tk->raw_time;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -593,11 +601,11 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
return ret;
}
@@ -612,11 +620,11 @@ u64 timekeeping_max_deferment(void)
u64 ret;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
ret = tk->clock->max_idle_ns;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
return ret;
}
@@ -677,11 +685,13 @@ void __init timekeeping_init(void)
boot.tv_nsec = 0;
}
- seqlock_init(&tk->lock);
+ raw_spin_lock_init(&tk->lock);
+ seqcount_init(&tk->seq);
ntp_init();
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
@@ -700,7 +710,8 @@ void __init timekeeping_init(void)
tmp.tv_nsec = 0;
tk_set_sleep_time(tk, tmp);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
}
/* time in seconds when suspend began */
@@ -747,7 +758,8 @@ void timekeeping_inject_sleeptime(struct
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
timekeeping_forward_now(tk);
@@ -755,7 +767,8 @@ void timekeeping_inject_sleeptime(struct
timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -778,7 +791,8 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
@@ -789,7 +803,8 @@ static void timekeeping_resume(void)
tk->ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update(tk, false);
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
touch_softlockup_watchdog();
@@ -808,7 +823,8 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
timekeeping_forward_now(tk);
timekeeping_suspended = 1;
@@ -831,7 +847,8 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&tk->lock, flags);
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -1141,7 +1158,8 @@ static void update_wall_time(void)
unsigned long flags;
s64 remainder;
- write_seqlock_irqsave(&tk->lock, flags);
+ raw_spin_lock_irqsave(&tk->lock, flags);
+ write_seqcount_begin(&tk->seq);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
@@ -1205,8 +1223,8 @@ static void update_wall_time(void)
timekeeping_update(tk, false);
out:
- write_sequnlock_irqrestore(&tk->lock, flags);
-
+ write_seqcount_end(&tk->seq);
+ raw_spin_unlock_irqrestore(&tk->lock, flags);
}
/**
@@ -1253,13 +1271,13 @@ void get_monotonic_boottime(struct times
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
ts->tv_sec = tk->xtime_sec;
nsec = timekeeping_get_ns(tk);
tomono = tk->wall_to_monotonic;
sleep = tk->total_sleep_time;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
ts->tv_nsec = 0;
@@ -1318,10 +1336,10 @@ struct timespec current_kernel_time(void
unsigned long seq;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
now = tk_xtime(tk);
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
return now;
}
@@ -1334,11 +1352,11 @@ struct timespec get_monotonic_coarse(voi
unsigned long seq;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
now = tk_xtime(tk);
mono = tk->wall_to_monotonic;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1371,11 +1389,11 @@ void get_xtime_and_monotonic_and_sleep_o
unsigned long seq;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
*xtim = tk_xtime(tk);
*wtom = tk->wall_to_monotonic;
*sleep = tk->total_sleep_time;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1395,14 +1413,14 @@ ktime_t ktime_get_update_offsets(ktime_t
u64 secs, nsecs;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
secs = tk->xtime_sec;
nsecs = timekeeping_get_ns(tk);
*offs_real = tk->offs_real;
*offs_boot = tk->offs_boot;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
now = ktime_add_ns(ktime_set(secs, 0), nsecs);
now = ktime_sub(now, *offs_real);
@@ -1420,9 +1438,9 @@ ktime_t ktime_get_monotonic_offset(void)
struct timespec wtom;
do {
- seq = read_seqbegin(&tk->lock);
+ seq = read_seqcount_begin(&tk->seq);
wtom = tk->wall_to_monotonic;
- } while (read_seqretry(&tk->lock, seq));
+ } while (read_seqcount_retry(&tk->seq, seq));
return timespec_to_ktime(wtom);
}
@@ -1436,7 +1454,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_of
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}