| Subject: vtime: Split lock and seqcount |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Tue, 23 Jul 2013 15:45:51 +0200 |
| |
| Replace vtime_seqlock seqlock with a simple seqcounter and a rawlock |
| so it can taken in atomic context on RT. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| include/linux/init_task.h | 3 +- |
| include/linux/sched.h | 3 +- |
| kernel/fork.c | 3 +- |
| kernel/sched/cputime.c | 62 +++++++++++++++++++++++++++++----------------- |
| 4 files changed, 46 insertions(+), 25 deletions(-) |
| --- a/include/linux/init_task.h |
| +++ b/include/linux/init_task.h |
| @@ -150,7 +150,8 @@ extern struct task_group root_task_group |
| |
| #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| # define INIT_VTIME(tsk) \ |
| - .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ |
| + .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ |
| + .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \ |
| .vtime_snap = 0, \ |
| .vtime_snap_whence = VTIME_SYS, |
| #else |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -1519,7 +1519,8 @@ struct task_struct { |
| cputime_t gtime; |
| struct prev_cputime prev_cputime; |
| #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| - seqlock_t vtime_seqlock; |
| + raw_spinlock_t vtime_lock; |
| + seqcount_t vtime_seq; |
| unsigned long long vtime_snap; |
| enum { |
| VTIME_SLEEPING = 0, |
| --- a/kernel/fork.c |
| +++ b/kernel/fork.c |
| @@ -1349,7 +1349,8 @@ static struct task_struct *copy_process( |
| prev_cputime_init(&p->prev_cputime); |
| |
| #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| - seqlock_init(&p->vtime_seqlock); |
| + raw_spin_lock_init(&p->vtime_lock); |
| + seqcount_init(&p->vtime_seq); |
| p->vtime_snap = 0; |
| p->vtime_snap_whence = VTIME_SLEEPING; |
| #endif |
| --- a/kernel/sched/cputime.c |
| +++ b/kernel/sched/cputime.c |
| @@ -696,37 +696,45 @@ static void __vtime_account_system(struc |
| |
| void vtime_account_system(struct task_struct *tsk) |
| { |
| - write_seqlock(&tsk->vtime_seqlock); |
| + raw_spin_lock(&tsk->vtime_lock); |
| + write_seqcount_begin(&tsk->vtime_seq); |
| __vtime_account_system(tsk); |
| - write_sequnlock(&tsk->vtime_seqlock); |
| + write_seqcount_end(&tsk->vtime_seq); |
| + raw_spin_unlock(&tsk->vtime_lock); |
| } |
| |
| void vtime_gen_account_irq_exit(struct task_struct *tsk) |
| { |
| - write_seqlock(&tsk->vtime_seqlock); |
| + raw_spin_lock(&tsk->vtime_lock); |
| + write_seqcount_begin(&tsk->vtime_seq); |
| __vtime_account_system(tsk); |
| if (context_tracking_in_user()) |
| tsk->vtime_snap_whence = VTIME_USER; |
| - write_sequnlock(&tsk->vtime_seqlock); |
| + write_seqcount_end(&tsk->vtime_seq); |
| + raw_spin_unlock(&tsk->vtime_lock); |
| } |
| |
| void vtime_account_user(struct task_struct *tsk) |
| { |
| cputime_t delta_cpu; |
| |
| - write_seqlock(&tsk->vtime_seqlock); |
| + raw_spin_lock(&tsk->vtime_lock); |
| + write_seqcount_begin(&tsk->vtime_seq); |
| delta_cpu = get_vtime_delta(tsk); |
| tsk->vtime_snap_whence = VTIME_SYS; |
| account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); |
| - write_sequnlock(&tsk->vtime_seqlock); |
| + write_seqcount_end(&tsk->vtime_seq); |
| + raw_spin_unlock(&tsk->vtime_lock); |
| } |
| |
| void vtime_user_enter(struct task_struct *tsk) |
| { |
| - write_seqlock(&tsk->vtime_seqlock); |
| + raw_spin_lock(&tsk->vtime_lock); |
| + write_seqcount_begin(&tsk->vtime_seq); |
| __vtime_account_system(tsk); |
| tsk->vtime_snap_whence = VTIME_USER; |
| - write_sequnlock(&tsk->vtime_seqlock); |
| + write_seqcount_end(&tsk->vtime_seq); |
| + raw_spin_unlock(&tsk->vtime_lock); |
| } |
| |
| void vtime_guest_enter(struct task_struct *tsk) |
| @@ -738,19 +746,23 @@ void vtime_guest_enter(struct task_struc |
| * synchronization against the reader (task_gtime()) |
| * that can thus safely catch up with a tickless delta. |
| */ |
| - write_seqlock(&tsk->vtime_seqlock); |
| + raw_spin_lock(&tsk->vtime_lock); |
| + write_seqcount_begin(&tsk->vtime_seq); |
| __vtime_account_system(tsk); |
| current->flags |= PF_VCPU; |
| - write_sequnlock(&tsk->vtime_seqlock); |
| + write_seqcount_end(&tsk->vtime_seq); |
| + raw_spin_unlock(&tsk->vtime_lock); |
| } |
| EXPORT_SYMBOL_GPL(vtime_guest_enter); |
| |
| void vtime_guest_exit(struct task_struct *tsk) |
| { |
| - write_seqlock(&tsk->vtime_seqlock); |
| + raw_spin_lock(&tsk->vtime_lock); |
| + write_seqcount_begin(&tsk->vtime_seq); |
| __vtime_account_system(tsk); |
| current->flags &= ~PF_VCPU; |
| - write_sequnlock(&tsk->vtime_seqlock); |
| + write_seqcount_end(&tsk->vtime_seq); |
| + raw_spin_unlock(&tsk->vtime_lock); |
| } |
| EXPORT_SYMBOL_GPL(vtime_guest_exit); |
| |
| @@ -763,24 +775,30 @@ void vtime_account_idle(struct task_stru |
| |
| void arch_vtime_task_switch(struct task_struct *prev) |
| { |
| - write_seqlock(&prev->vtime_seqlock); |
| + raw_spin_lock(&prev->vtime_lock); |
| + write_seqcount_begin(&prev->vtime_seq); |
| prev->vtime_snap_whence = VTIME_SLEEPING; |
| - write_sequnlock(&prev->vtime_seqlock); |
| + write_seqcount_end(&prev->vtime_seq); |
| + raw_spin_unlock(&prev->vtime_lock); |
| |
| - write_seqlock(¤t->vtime_seqlock); |
| + raw_spin_lock(¤t->vtime_lock); |
| + write_seqcount_begin(¤t->vtime_seq); |
| current->vtime_snap_whence = VTIME_SYS; |
| current->vtime_snap = sched_clock_cpu(smp_processor_id()); |
| - write_sequnlock(¤t->vtime_seqlock); |
| + write_seqcount_end(¤t->vtime_seq); |
| + raw_spin_unlock(¤t->vtime_lock); |
| } |
| |
| void vtime_init_idle(struct task_struct *t, int cpu) |
| { |
| unsigned long flags; |
| |
| - write_seqlock_irqsave(&t->vtime_seqlock, flags); |
| + raw_spin_lock_irqsave(&t->vtime_lock, flags); |
| + write_seqcount_begin(&t->vtime_seq); |
| t->vtime_snap_whence = VTIME_SYS; |
| t->vtime_snap = sched_clock_cpu(cpu); |
| - write_sequnlock_irqrestore(&t->vtime_seqlock, flags); |
| + write_seqcount_end(&t->vtime_seq); |
| + raw_spin_unlock_irqrestore(&t->vtime_lock, flags); |
| } |
| |
| cputime_t task_gtime(struct task_struct *t) |
| @@ -792,13 +810,13 @@ cputime_t task_gtime(struct task_struct |
| return t->gtime; |
| |
| do { |
| - seq = read_seqbegin(&t->vtime_seqlock); |
| + seq = read_seqcount_begin(&t->vtime_seq); |
| |
| gtime = t->gtime; |
| if (t->flags & PF_VCPU) |
| gtime += vtime_delta(t); |
| |
| - } while (read_seqretry(&t->vtime_seqlock, seq)); |
| + } while (read_seqcount_retry(&t->vtime_seq, seq)); |
| |
| return gtime; |
| } |
| @@ -821,7 +839,7 @@ fetch_task_cputime(struct task_struct *t |
| *udelta = 0; |
| *sdelta = 0; |
| |
| - seq = read_seqbegin(&t->vtime_seqlock); |
| + seq = read_seqcount_begin(&t->vtime_seq); |
| |
| if (u_dst) |
| *u_dst = *u_src; |
| @@ -845,7 +863,7 @@ fetch_task_cputime(struct task_struct *t |
| if (t->vtime_snap_whence == VTIME_SYS) |
| *sdelta = delta; |
| } |
| - } while (read_seqretry(&t->vtime_seqlock, seq)); |
| + } while (read_seqcount_retry(&t->vtime_seq, seq)); |
| } |
| |
| |