| From 9fd9af20640d4da2a4bd7311ffee1b5c7bd7b88f Mon Sep 17 00:00:00 2001 |
| From: John Stultz <johnstul@us.ibm.com> |
| Date: Fri, 3 Jul 2009 08:29:58 -0500 |
| Subject: [PATCH] posix-timers: thread posix-cpu-timers on -rt |
| |
| commit 14cbf680c340564ac33e3518bf6c110d08d93cc9 in tip. |
| |
| posix-cpu-timer code takes non -rt safe locks in hard irq |
| context. Move it to a thread. |
| |
| Signed-off-by: John Stultz <johnstul@us.ibm.com> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/include/linux/init_task.h b/include/linux/init_task.h |
| index e93b8cd..cae32ed 100644 |
| --- a/include/linux/init_task.h |
| +++ b/include/linux/init_task.h |
| @@ -167,6 +167,7 @@ extern struct cred init_cred; |
| .fs_excl = ATOMIC_INIT(0), \ |
| .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ |
| .timer_slack_ns = 50000, /* 50 usec default slack */ \ |
| + .posix_timer_list = NULL, \ |
| .pids = { \ |
| [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ |
| [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ |
| diff --git a/include/linux/sched.h b/include/linux/sched.h |
| index 7e50287..7b23ee4 100644 |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -1370,6 +1370,8 @@ struct task_struct { |
| struct task_cputime cputime_expires; |
| struct list_head cpu_timers[3]; |
| |
| + struct task_struct* posix_timer_list; |
| + |
| /* process credentials */ |
| const struct cred *real_cred; /* objective and real subjective task |
| * credentials (COW) */ |
| diff --git a/init/main.c b/init/main.c |
| index edabbff..650c1ce 100644 |
| --- a/init/main.c |
| +++ b/init/main.c |
| @@ -36,6 +36,7 @@ |
| #include <linux/workqueue.h> |
| #include <linux/profile.h> |
| #include <linux/rcupdate.h> |
| +#include <linux/posix-timers.h> |
| #include <linux/moduleparam.h> |
| #include <linux/kallsyms.h> |
| #include <linux/writeback.h> |
| diff --git a/kernel/fork.c b/kernel/fork.c |
| index bab0a66..334ebe7 100644 |
| --- a/kernel/fork.c |
| +++ b/kernel/fork.c |
| @@ -1090,7 +1090,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
| acct_clear_integrals(p); |
| |
| posix_cpu_timers_init(p); |
| - |
| + p->posix_timer_list = NULL; |
| p->lock_depth = -1; /* -1 = no lock */ |
| do_posix_clock_monotonic_gettime(&p->start_time); |
| p->real_start_time = p->start_time; |
| diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c |
| index 359cc24..36994bc 100644 |
| --- a/kernel/posix-cpu-timers.c |
| +++ b/kernel/posix-cpu-timers.c |
| @@ -570,7 +570,7 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) |
| p->cpu_timers : p->signal->cpu_timers); |
| head += CPUCLOCK_WHICH(timer->it_clock); |
| |
| - BUG_ON(!irqs_disabled()); |
| + BUG_ON_NONRT(!irqs_disabled()); |
| spin_lock(&p->sighand->siglock); |
| |
| listpos = head; |
| @@ -749,7 +749,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, |
| /* |
| * Disarm any old timer after extracting its expiry time. |
| */ |
| - BUG_ON(!irqs_disabled()); |
| + BUG_ON_NONRT(!irqs_disabled()); |
| |
| ret = 0; |
| spin_lock(&p->sighand->siglock); |
| @@ -1390,12 +1390,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk) |
| * already updated our counts. We need to check if any timers fire now. |
| * Interrupts are disabled. |
| */ |
| -void run_posix_cpu_timers(struct task_struct *tsk) |
| +void __run_posix_cpu_timers(struct task_struct *tsk) |
| { |
| LIST_HEAD(firing); |
| struct k_itimer *timer, *next; |
| |
| - BUG_ON(!irqs_disabled()); |
| |
| /* |
| * The fast path checks that there are no expired thread or thread |
| @@ -1447,6 +1446,162 @@ void run_posix_cpu_timers(struct task_struct *tsk) |
| } |
| } |
| |
| +#include <linux/kthread.h> |
| +#include <linux/cpu.h> |
| +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); |
| +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); |
| + |
| +static int posix_cpu_timers_thread(void *data) |
| +{ |
| + int cpu = (long)data; |
| + |
| + BUG_ON(per_cpu(posix_timer_task,cpu) != current); |
| + |
| + while (!kthread_should_stop()) { |
| + struct task_struct *tsk = NULL; |
| + struct task_struct *next = NULL; |
| + |
| + if (cpu_is_offline(cpu)) |
| + goto wait_to_die; |
| + |
| + /* grab task list */ |
| + raw_local_irq_disable(); |
| + tsk = per_cpu(posix_timer_tasklist, cpu); |
| + per_cpu(posix_timer_tasklist, cpu) = NULL; |
| + raw_local_irq_enable(); |
| + |
| + /* its possible the list is empty, just return */ |
| + if (!tsk) { |
| + set_current_state(TASK_INTERRUPTIBLE); |
| + schedule(); |
| + __set_current_state(TASK_RUNNING); |
| + continue; |
| + } |
| + |
| + /* Process task list */ |
| + while (1) { |
| + /* save next */ |
| + next = tsk->posix_timer_list; |
| + |
| + /* run the task timers, clear its ptr and |
| + * unreference it |
| + */ |
| + __run_posix_cpu_timers(tsk); |
| + tsk->posix_timer_list = NULL; |
| + put_task_struct(tsk); |
| + |
| + /* check if this is the last on the list */ |
| + if (next == tsk) |
| + break; |
| + tsk = next; |
| + } |
| + } |
| + return 0; |
| + |
| +wait_to_die: |
| + /* Wait for kthread_stop */ |
| + set_current_state(TASK_INTERRUPTIBLE); |
| + while (!kthread_should_stop()) { |
| + schedule(); |
| + set_current_state(TASK_INTERRUPTIBLE); |
| + } |
| + __set_current_state(TASK_RUNNING); |
| + return 0; |
| +} |
| + |
| +void run_posix_cpu_timers(struct task_struct *tsk) |
| +{ |
| + unsigned long cpu = smp_processor_id(); |
| + struct task_struct *tasklist; |
| + |
| + BUG_ON(!irqs_disabled()); |
| + if(!per_cpu(posix_timer_task, cpu)) |
| + return; |
| + /* get per-cpu references */ |
| + tasklist = per_cpu(posix_timer_tasklist, cpu); |
| + |
| + /* check to see if we're already queued */ |
| + if (!tsk->posix_timer_list) { |
| + get_task_struct(tsk); |
| + if (tasklist) { |
| + tsk->posix_timer_list = tasklist; |
| + } else { |
| + /* |
| + * The list is terminated by a self-pointing |
| + * task_struct |
| + */ |
| + tsk->posix_timer_list = tsk; |
| + } |
| + per_cpu(posix_timer_tasklist, cpu) = tsk; |
| + } |
| + /* XXX signal the thread somehow */ |
| + wake_up_process(per_cpu(posix_timer_task, cpu)); |
| +} |
| + |
| +/* |
| + * posix_cpu_thread_call - callback that gets triggered when a CPU is added. |
| + * Here we can start up the necessary migration thread for the new CPU. |
| + */ |
| +static int posix_cpu_thread_call(struct notifier_block *nfb, |
| + unsigned long action, void *hcpu) |
| +{ |
| + int cpu = (long)hcpu; |
| + struct task_struct *p; |
| + struct sched_param param; |
| + |
| + switch (action) { |
| + case CPU_UP_PREPARE: |
| + p = kthread_create(posix_cpu_timers_thread, hcpu, |
| + "posix_cpu_timers/%d",cpu); |
| + if (IS_ERR(p)) |
| + return NOTIFY_BAD; |
| + p->flags |= PF_NOFREEZE; |
| + kthread_bind(p, cpu); |
| + /* Must be high prio to avoid getting starved */ |
| + param.sched_priority = MAX_RT_PRIO-1; |
| + sched_setscheduler(p, SCHED_FIFO, ¶m); |
| + per_cpu(posix_timer_task,cpu) = p; |
| + break; |
| + case CPU_ONLINE: |
| + /* Strictly unneccessary, as first user will wake it. */ |
| + wake_up_process(per_cpu(posix_timer_task,cpu)); |
| + break; |
| +#ifdef CONFIG_HOTPLUG_CPU |
| + case CPU_UP_CANCELED: |
| + /* Unbind it from offline cpu so it can run. Fall thru. */ |
| + kthread_bind(per_cpu(posix_timer_task,cpu), |
| + any_online_cpu(cpu_online_map)); |
| + kthread_stop(per_cpu(posix_timer_task,cpu)); |
| + per_cpu(posix_timer_task,cpu) = NULL; |
| + break; |
| + case CPU_DEAD: |
| + kthread_stop(per_cpu(posix_timer_task,cpu)); |
| + per_cpu(posix_timer_task,cpu) = NULL; |
| + break; |
| +#endif |
| + } |
| + return NOTIFY_OK; |
| +} |
| + |
| +/* Register at highest priority so that task migration (migrate_all_tasks) |
| + * happens before everything else. |
| + */ |
| +static struct notifier_block __devinitdata posix_cpu_thread_notifier = { |
| + .notifier_call = posix_cpu_thread_call, |
| + .priority = 10 |
| +}; |
| + |
| +static int __init posix_cpu_thread_init(void) |
| +{ |
| + void *cpu = (void *)(long)smp_processor_id(); |
| + /* Start one for boot CPU. */ |
| + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, cpu); |
| + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, cpu); |
| + register_cpu_notifier(&posix_cpu_thread_notifier); |
| + return 0; |
| +} |
| +early_initcall(posix_cpu_thread_init); |
| + |
| /* |
| * Set one of the process-wide special case CPU timers. |
| * The tsk->sighand->siglock must be held by the caller. |
| @@ -1713,6 +1868,12 @@ static __init int init_posix_cpu_timers(void) |
| .nsleep_restart = thread_cpu_nsleep_restart, |
| }; |
| struct timespec ts; |
| + unsigned long cpu; |
| + |
| + /* init the per-cpu posix_timer_tasklets */ |
| + for_each_cpu_mask(cpu, cpu_possible_map) { |
| + per_cpu(posix_timer_tasklist, cpu) = NULL; |
| + } |
| |
| register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
| register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
| -- |
| 1.7.1.1 |
| |