| From fb8e17a618e797862727494084cd3eefe65d1c75 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Fri, 3 Jul 2009 08:44:56 -0500 |
| Subject: [PATCH] signals: Allow rt tasks to cache one sigqueue struct |
| |
| To avoid allocation allow rt tasks to cache one sigqueue struct in |
| task struct. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| diff --git a/include/linux/sched.h b/include/linux/sched.h |
| index f0e26b65534e..478da6a95a6f 100644 |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -753,6 +753,8 @@ struct task_struct { |
| /* Signal handlers: */ |
| struct signal_struct *signal; |
| struct sighand_struct *sighand; |
| + struct sigqueue *sigqueue_cache; |
| + |
| sigset_t blocked; |
| sigset_t real_blocked; |
| /* Restored if set_restore_sigmask() was used: */ |
| diff --git a/include/linux/signal.h b/include/linux/signal.h |
| index 1f5a16620693..e4dc73c31289 100644 |
| --- a/include/linux/signal.h |
| +++ b/include/linux/signal.h |
| @@ -231,6 +231,7 @@ static inline void init_sigpending(struct sigpending *sig) |
| } |
| |
| extern void flush_sigqueue(struct sigpending *queue); |
| +extern void flush_task_sigqueue(struct task_struct *tsk); |
| |
| /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ |
| static inline int valid_signal(unsigned long sig) |
| diff --git a/kernel/exit.c b/kernel/exit.c |
| index 516acdb0e0ec..e8c8f882b930 100644 |
| --- a/kernel/exit.c |
| +++ b/kernel/exit.c |
| @@ -159,7 +159,7 @@ static void __exit_signal(struct task_struct *tsk) |
| * Do this under ->siglock, we can race with another thread |
| * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. |
| */ |
| - flush_sigqueue(&tsk->pending); |
| + flush_task_sigqueue(tsk); |
| tsk->sighand = NULL; |
| spin_unlock(&sighand->siglock); |
| |
| diff --git a/kernel/fork.c b/kernel/fork.c |
| index 2791d7d23caa..f31a0b0341e0 100644 |
| --- a/kernel/fork.c |
| +++ b/kernel/fork.c |
| @@ -1631,6 +1631,7 @@ static __latent_entropy struct task_struct *copy_process( |
| spin_lock_init(&p->alloc_lock); |
| |
| init_sigpending(&p->pending); |
| + p->sigqueue_cache = NULL; |
| |
| p->utime = p->stime = p->gtime = 0; |
| #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
| diff --git a/kernel/signal.c b/kernel/signal.c |
| index 60da32dd6420..042a263e94bc 100644 |
| --- a/kernel/signal.c |
| +++ b/kernel/signal.c |
| @@ -19,6 +19,7 @@ |
| #include <linux/sched/task.h> |
| #include <linux/sched/task_stack.h> |
| #include <linux/sched/cputime.h> |
| +#include <linux/sched/rt.h> |
| #include <linux/fs.h> |
| #include <linux/tty.h> |
| #include <linux/binfmts.h> |
| @@ -357,13 +358,30 @@ static bool task_participate_group_stop(struct task_struct *task) |
| return false; |
| } |
| |
| +static inline struct sigqueue *get_task_cache(struct task_struct *t) |
| +{ |
| + struct sigqueue *q = t->sigqueue_cache; |
| + |
| + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) |
| + return NULL; |
| + return q; |
| +} |
| + |
| +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) |
| +{ |
| + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) |
| + return 0; |
| + return 1; |
| +} |
| + |
| /* |
| * allocate a new signal queue record |
| * - this may be called without locks if and only if t == current, otherwise an |
| * appropriate lock must be held to stop the target task from exiting |
| */ |
| static struct sigqueue * |
| -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
| +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, |
| + int override_rlimit, int fromslab) |
| { |
| struct sigqueue *q = NULL; |
| struct user_struct *user; |
| @@ -380,7 +398,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi |
| if (override_rlimit || |
| atomic_read(&user->sigpending) <= |
| task_rlimit(t, RLIMIT_SIGPENDING)) { |
| - q = kmem_cache_alloc(sigqueue_cachep, flags); |
| + if (!fromslab) |
| + q = get_task_cache(t); |
| + if (!q) |
| + q = kmem_cache_alloc(sigqueue_cachep, flags); |
| } else { |
| print_dropped_signal(sig); |
| } |
| @@ -397,6 +418,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi |
| return q; |
| } |
| |
| +static struct sigqueue * |
| +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, |
| + int override_rlimit) |
| +{ |
| + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); |
| +} |
| + |
| static void __sigqueue_free(struct sigqueue *q) |
| { |
| if (q->flags & SIGQUEUE_PREALLOC) |
| @@ -406,6 +434,21 @@ static void __sigqueue_free(struct sigqueue *q) |
| kmem_cache_free(sigqueue_cachep, q); |
| } |
| |
| +static void sigqueue_free_current(struct sigqueue *q) |
| +{ |
| + struct user_struct *up; |
| + |
| + if (q->flags & SIGQUEUE_PREALLOC) |
| + return; |
| + |
| + up = q->user; |
| + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { |
| + atomic_dec(&up->sigpending); |
| + free_uid(up); |
| + } else |
| + __sigqueue_free(q); |
| +} |
| + |
| void flush_sigqueue(struct sigpending *queue) |
| { |
| struct sigqueue *q; |
| @@ -419,6 +462,21 @@ void flush_sigqueue(struct sigpending *queue) |
| } |
| |
| /* |
| + * Called from __exit_signal. Flush tsk->pending and |
| + * tsk->sigqueue_cache |
| + */ |
| +void flush_task_sigqueue(struct task_struct *tsk) |
| +{ |
| + struct sigqueue *q; |
| + |
| + flush_sigqueue(&tsk->pending); |
| + |
| + q = get_task_cache(tsk); |
| + if (q) |
| + kmem_cache_free(sigqueue_cachep, q); |
| +} |
| + |
| +/* |
| * Flush all pending signals for this kthread. |
| */ |
| void flush_signals(struct task_struct *t) |
| @@ -539,7 +597,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, |
| (info->si_code == SI_TIMER) && |
| (info->si_sys_private); |
| |
| - __sigqueue_free(first); |
| + sigqueue_free_current(first); |
| } else { |
| /* |
| * Ok, it wasn't in the queue. This must be |
| @@ -575,6 +633,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
| bool resched_timer = false; |
| int signr; |
| |
| + WARN_ON_ONCE(tsk != current); |
| + |
| /* We only dequeue private signals from ourselves, we don't let |
| * signalfd steal them |
| */ |
| @@ -1504,7 +1564,8 @@ EXPORT_SYMBOL(kill_pid); |
| */ |
| struct sigqueue *sigqueue_alloc(void) |
| { |
| - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
| + /* Preallocated sigqueue objects always from the slabcache ! */ |
| + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); |
| |
| if (q) |
| q->flags |= SIGQUEUE_PREALLOC; |
| -- |
| 2.1.4 |
| |