| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Fri, 3 Jul 2009 08:44:56 -0500 |
| Subject: signals: Allow rt tasks to cache one sigqueue struct |
| |
| To avoid allocation allow rt tasks to cache one sigqueue struct in |
| task struct. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| |
| --- |
| include/linux/sched.h | 1 |
| include/linux/signal.h | 1 |
| kernel/exit.c | 2 - |
| kernel/fork.c | 1 |
| kernel/signal.c | 94 ++++++++++++++++++++++++++++++++++++++++++++----- |
| 5 files changed, 89 insertions(+), 10 deletions(-) |
| |
| --- a/include/linux/sched.h |
| +++ b/include/linux/sched.h |
| @@ -1215,6 +1215,7 @@ struct task_struct { |
| /* signal handlers */ |
| struct signal_struct *signal; |
| struct sighand_struct *sighand; |
| + struct sigqueue *sigqueue_cache; |
| |
| sigset_t blocked, real_blocked; |
| sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ |
| --- a/include/linux/signal.h |
| +++ b/include/linux/signal.h |
| @@ -226,6 +226,7 @@ static inline void init_sigpending(struc |
| } |
| |
| extern void flush_sigqueue(struct sigpending *queue); |
| +extern void flush_task_sigqueue(struct task_struct *tsk); |
| |
| /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ |
| static inline int valid_signal(unsigned long sig) |
| --- a/kernel/exit.c |
| +++ b/kernel/exit.c |
| @@ -145,7 +145,7 @@ static void __exit_signal(struct task_st |
| * Do this under ->siglock, we can race with another thread |
| * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. |
| */ |
| - flush_sigqueue(&tsk->pending); |
| + flush_task_sigqueue(tsk); |
| tsk->sighand = NULL; |
| spin_unlock(&sighand->siglock); |
| |
| --- a/kernel/fork.c |
| +++ b/kernel/fork.c |
| @@ -1231,6 +1231,7 @@ static struct task_struct *copy_process( |
| spin_lock_init(&p->alloc_lock); |
| |
| init_sigpending(&p->pending); |
| + p->sigqueue_cache = NULL; |
| |
| p->utime = p->stime = p->gtime = 0; |
| p->utimescaled = p->stimescaled = 0; |
| --- a/kernel/signal.c |
| +++ b/kernel/signal.c |
| @@ -14,6 +14,7 @@ |
| #include <linux/export.h> |
| #include <linux/init.h> |
| #include <linux/sched.h> |
| +#include <linux/sched/rt.h> |
| #include <linux/fs.h> |
| #include <linux/tty.h> |
| #include <linux/binfmts.h> |
| @@ -349,13 +350,45 @@ static bool task_participate_group_stop( |
| return false; |
| } |
| |
| +#ifdef __HAVE_ARCH_CMPXCHG |
| +static inline struct sigqueue *get_task_cache(struct task_struct *t) |
| +{ |
| + struct sigqueue *q = t->sigqueue_cache; |
| + |
| + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) |
| + return NULL; |
| + return q; |
| +} |
| + |
| +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) |
| +{ |
| + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) |
| + return 0; |
| + return 1; |
| +} |
| + |
| +#else |
| + |
| +static inline struct sigqueue *get_task_cache(struct task_struct *t) |
| +{ |
| + return NULL; |
| +} |
| + |
| +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) |
| +{ |
| + return 1; |
| +} |
| + |
| +#endif |
| + |
| /* |
| * allocate a new signal queue record |
| * - this may be called without locks if and only if t == current, otherwise an |
| * appropriate lock must be held to stop the target task from exiting |
| */ |
| static struct sigqueue * |
| -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
| +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, |
| + int override_rlimit, int fromslab) |
| { |
| struct sigqueue *q = NULL; |
| struct user_struct *user; |
| @@ -372,7 +405,10 @@ __sigqueue_alloc(int sig, struct task_st |
| if (override_rlimit || |
| atomic_read(&user->sigpending) <= |
| task_rlimit(t, RLIMIT_SIGPENDING)) { |
| - q = kmem_cache_alloc(sigqueue_cachep, flags); |
| + if (!fromslab) |
| + q = get_task_cache(t); |
| + if (!q) |
| + q = kmem_cache_alloc(sigqueue_cachep, flags); |
| } else { |
| print_dropped_signal(sig); |
| } |
| @@ -389,6 +425,13 @@ __sigqueue_alloc(int sig, struct task_st |
| return q; |
| } |
| |
| +static struct sigqueue * |
| +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, |
| + int override_rlimit) |
| +{ |
| + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); |
| +} |
| + |
| static void __sigqueue_free(struct sigqueue *q) |
| { |
| if (q->flags & SIGQUEUE_PREALLOC) |
| @@ -398,6 +441,21 @@ static void __sigqueue_free(struct sigqu |
| kmem_cache_free(sigqueue_cachep, q); |
| } |
| |
| +static void sigqueue_free_current(struct sigqueue *q) |
| +{ |
| + struct user_struct *up; |
| + |
| + if (q->flags & SIGQUEUE_PREALLOC) |
| + return; |
| + |
| + up = q->user; |
| + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { |
| + atomic_dec(&up->sigpending); |
| + free_uid(up); |
| + } else |
| + __sigqueue_free(q); |
| +} |
| + |
| void flush_sigqueue(struct sigpending *queue) |
| { |
| struct sigqueue *q; |
| @@ -411,6 +469,21 @@ void flush_sigqueue(struct sigpending *q |
| } |
| |
| /* |
| + * Called from __exit_signal. Flush tsk->pending and |
| + * tsk->sigqueue_cache |
| + */ |
| +void flush_task_sigqueue(struct task_struct *tsk) |
| +{ |
| + struct sigqueue *q; |
| + |
| + flush_sigqueue(&tsk->pending); |
| + |
| + q = get_task_cache(tsk); |
| + if (q) |
| + kmem_cache_free(sigqueue_cachep, q); |
| +} |
| + |
| +/* |
| * Flush all pending signals for a task. |
| */ |
| void __flush_signals(struct task_struct *t) |
| @@ -562,7 +635,7 @@ static void collect_signal(int sig, stru |
| still_pending: |
| list_del_init(&first->list); |
| copy_siginfo(info, &first->info); |
| - __sigqueue_free(first); |
| + sigqueue_free_current(first); |
| } else { |
| /* |
| * Ok, it wasn't in the queue. This must be |
| @@ -608,6 +681,8 @@ int dequeue_signal(struct task_struct *t |
| { |
| int signr; |
| |
| + WARN_ON_ONCE(tsk != current); |
| + |
| /* We only dequeue private signals from ourselves, we don't let |
| * signalfd steal them |
| */ |
| @@ -1547,7 +1622,8 @@ EXPORT_SYMBOL(kill_pid); |
| */ |
| struct sigqueue *sigqueue_alloc(void) |
| { |
| - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
| + /* Preallocated sigqueue objects always from the slabcache ! */ |
| + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); |
| |
| if (q) |
| q->flags |= SIGQUEUE_PREALLOC; |
| @@ -2367,7 +2443,7 @@ relock: |
| } |
| |
| /** |
| - * signal_delivered - |
| + * signal_delivered - |
| * @sig: number of signal being delivered |
| * @info: siginfo_t of signal being delivered |
| * @ka: sigaction setting that chose the handler |
| @@ -3126,7 +3202,7 @@ int do_sigaction(int sig, struct k_sigac |
| return 0; |
| } |
| |
| -static int |
| +static int |
| do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
| { |
| stack_t oss; |
| @@ -3270,7 +3346,7 @@ int __compat_save_altstack(compat_stack_ |
| */ |
| SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
| { |
| - return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); |
| + return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); |
| } |
| |
| #endif |
| @@ -3395,7 +3471,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int |
| ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
| if (!ret && oact) { |
| sigset_to_compat(&mask, &old_ka.sa.sa_mask); |
| - ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
| + ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
| &oact->sa_handler); |
| ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); |
| ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
| @@ -3571,7 +3647,7 @@ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t |
| return -EFAULT; |
| return sigsuspend(&newset); |
| } |
| - |
| + |
| #ifdef CONFIG_COMPAT |
| COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) |
| { |