| From: Daniel Wagner <daniel.wagner@bmw-carit.de> |
| Date: Fri, 11 Jul 2014 15:26:11 +0200 |
| Subject: work-simple: Simple work queue implemenation |
| |
| Provides a framework for enqueuing callbacks from irq context |
| PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. |
| |
| Bases on wait-simple. |
| |
| Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> |
| --- |
| include/linux/swork.h | 24 ++++++ |
| kernel/sched/Makefile | 2 |
| kernel/sched/swork.c | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++ |
| 3 files changed, 198 insertions(+), 1 deletion(-) |
| |
| --- /dev/null |
| +++ b/include/linux/swork.h |
| @@ -0,0 +1,24 @@ |
| +#ifndef _LINUX_SWORK_H |
| +#define _LINUX_SWORK_H |
| + |
| +#include <linux/list.h> |
| + |
| +struct swork_event { |
| + struct list_head item; |
| + unsigned long flags; |
| + void (*func)(struct swork_event *); |
| +}; |
| + |
| +static inline void INIT_SWORK(struct swork_event *event, |
| + void (*func)(struct swork_event *)) |
| +{ |
| + event->flags = 0; |
| + event->func = func; |
| +} |
| + |
| +bool swork_queue(struct swork_event *sev); |
| + |
| +int swork_get(void); |
| +void swork_put(void); |
| + |
| +#endif /* _LINUX_SWORK_H */ |
| --- a/kernel/sched/Makefile |
| +++ b/kernel/sched/Makefile |
| @@ -17,7 +17,7 @@ endif |
| |
| obj-y += core.o loadavg.o clock.o cputime.o |
| obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o |
| -obj-y += wait.o swait.o completion.o idle.o |
| +obj-y += wait.o swait.o swork.o completion.o idle.o |
| obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o |
| obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o |
| obj-$(CONFIG_SCHEDSTATS) += stats.o |
| --- /dev/null |
| +++ b/kernel/sched/swork.c |
| @@ -0,0 +1,173 @@ |
| +/* |
| + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de |
| + * |
| + * Provides a framework for enqueuing callbacks from irq context |
| + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. |
| + */ |
| + |
| +#include <linux/swait.h> |
| +#include <linux/swork.h> |
| +#include <linux/kthread.h> |
| +#include <linux/slab.h> |
| +#include <linux/spinlock.h> |
| +#include <linux/export.h> |
| + |
| +#define SWORK_EVENT_PENDING (1 << 0) |
| + |
| +static DEFINE_MUTEX(worker_mutex); |
| +static struct sworker *glob_worker; |
| + |
| +struct sworker { |
| + struct list_head events; |
| + struct swait_queue_head wq; |
| + |
| + raw_spinlock_t lock; |
| + |
| + struct task_struct *task; |
| + int refs; |
| +}; |
| + |
| +static bool swork_readable(struct sworker *worker) |
| +{ |
| + bool r; |
| + |
| + if (kthread_should_stop()) |
| + return true; |
| + |
| + raw_spin_lock_irq(&worker->lock); |
| + r = !list_empty(&worker->events); |
| + raw_spin_unlock_irq(&worker->lock); |
| + |
| + return r; |
| +} |
| + |
| +static int swork_kthread(void *arg) |
| +{ |
| + struct sworker *worker = arg; |
| + |
| + for (;;) { |
| + swait_event_interruptible(worker->wq, |
| + swork_readable(worker)); |
| + if (kthread_should_stop()) |
| + break; |
| + |
| + raw_spin_lock_irq(&worker->lock); |
| + while (!list_empty(&worker->events)) { |
| + struct swork_event *sev; |
| + |
| + sev = list_first_entry(&worker->events, |
| + struct swork_event, item); |
| + list_del(&sev->item); |
| + raw_spin_unlock_irq(&worker->lock); |
| + |
| + WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, |
| + &sev->flags)); |
| + sev->func(sev); |
| + raw_spin_lock_irq(&worker->lock); |
| + } |
| + raw_spin_unlock_irq(&worker->lock); |
| + } |
| + return 0; |
| +} |
| + |
| +static struct sworker *swork_create(void) |
| +{ |
| + struct sworker *worker; |
| + |
| + worker = kzalloc(sizeof(*worker), GFP_KERNEL); |
| + if (!worker) |
| + return ERR_PTR(-ENOMEM); |
| + |
| + INIT_LIST_HEAD(&worker->events); |
| + raw_spin_lock_init(&worker->lock); |
| + init_swait_queue_head(&worker->wq); |
| + |
| + worker->task = kthread_run(swork_kthread, worker, "kswork"); |
| + if (IS_ERR(worker->task)) { |
| + kfree(worker); |
| + return ERR_PTR(-ENOMEM); |
| + } |
| + |
| + return worker; |
| +} |
| + |
| +static void swork_destroy(struct sworker *worker) |
| +{ |
| + kthread_stop(worker->task); |
| + |
| + WARN_ON(!list_empty(&worker->events)); |
| + kfree(worker); |
| +} |
| + |
| +/** |
| + * swork_queue - queue swork |
| + * |
| + * Returns %false if @work was already on a queue, %true otherwise. |
| + * |
| + * The work is queued and processed on a random CPU |
| + */ |
| +bool swork_queue(struct swork_event *sev) |
| +{ |
| + unsigned long flags; |
| + |
| + if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) |
| + return false; |
| + |
| + raw_spin_lock_irqsave(&glob_worker->lock, flags); |
| + list_add_tail(&sev->item, &glob_worker->events); |
| + raw_spin_unlock_irqrestore(&glob_worker->lock, flags); |
| + |
| + swake_up(&glob_worker->wq); |
| + return true; |
| +} |
| +EXPORT_SYMBOL_GPL(swork_queue); |
| + |
| +/** |
| + * swork_get - get an instance of the sworker |
| + * |
| + * Returns an negative error code if the initialization if the worker did not |
| + * work, %0 otherwise. |
| + * |
| + */ |
| +int swork_get(void) |
| +{ |
| + struct sworker *worker; |
| + |
| + mutex_lock(&worker_mutex); |
| + if (!glob_worker) { |
| + worker = swork_create(); |
| + if (IS_ERR(worker)) { |
| + mutex_unlock(&worker_mutex); |
| + return -ENOMEM; |
| + } |
| + |
| + glob_worker = worker; |
| + } |
| + |
| + glob_worker->refs++; |
| + mutex_unlock(&worker_mutex); |
| + |
| + return 0; |
| +} |
| +EXPORT_SYMBOL_GPL(swork_get); |
| + |
| +/** |
| + * swork_put - puts an instance of the sworker |
| + * |
| + * Will destroy the sworker thread. This function must not be called until all |
| + * queued events have been completed. |
| + */ |
| +void swork_put(void) |
| +{ |
| + mutex_lock(&worker_mutex); |
| + |
| + glob_worker->refs--; |
| + if (glob_worker->refs > 0) |
| + goto out; |
| + |
| + swork_destroy(glob_worker); |
| + glob_worker = NULL; |
| +out: |
| + mutex_unlock(&worker_mutex); |
| +} |
| +EXPORT_SYMBOL_GPL(swork_put); |