| From: Anna-Maria Gleixner <anna-maria@linutronix.de> |
| Date: Thu, 31 Aug 2017 11:03:08 +0000 |
| Subject: [PATCH 16/25] hrtimer: Allow function reuse for softirq based hrtimer |
| |
| The softirq based hrtimer can utilize most of the existing hrtimer |
| functions, but need to operate on a different data set. Add an active_mask |
| argument to various functions so the hard and soft bases can be |
| selected. Fixup the existing callers and hand in the ACTIVE_HARD mask. |
| |
| Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| kernel/time/hrtimer.c | 20 +++++++++++++++----- |
| 1 file changed, 15 insertions(+), 5 deletions(-) |
| |
| --- a/kernel/time/hrtimer.c |
| +++ b/kernel/time/hrtimer.c |
| @@ -71,6 +71,14 @@ |
| #define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK) |
| |
| /* |
| + * Masks for selecting the soft and hard context timers from |
| + * cpu_base->active |
| + */ |
| +#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT) |
| +#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1) |
| +#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) |
| + |
| +/* |
| * The timer bases: |
| * |
| * There are more clockids than hrtimer bases. Thus, we index |
| @@ -526,11 +534,12 @@ static ktime_t __hrtimer_next_event_base |
| |
| static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) |
| { |
| - unsigned int active = cpu_base->active_bases; |
| + unsigned int active; |
| ktime_t expires_next = KTIME_MAX; |
| |
| hrtimer_update_next_timer(cpu_base, NULL); |
| |
| + active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; |
| expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next); |
| |
| return expires_next; |
| @@ -1263,9 +1272,10 @@ static void __run_hrtimer(struct hrtimer |
| base->running = NULL; |
| } |
| |
| -static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) |
| +static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, |
| + unsigned int active_mask) |
| { |
| - unsigned int active = cpu_base->active_bases; |
| + unsigned int active = cpu_base->active_bases & active_mask; |
| |
| while (active) { |
| unsigned int id = __ffs(active); |
| @@ -1332,7 +1342,7 @@ void hrtimer_interrupt(struct clock_even |
| */ |
| cpu_base->expires_next = KTIME_MAX; |
| |
| - __hrtimer_run_queues(cpu_base, now); |
| + __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD); |
| |
| /* Reevaluate the clock bases for the next expiry */ |
| expires_next = __hrtimer_get_next_event(cpu_base); |
| @@ -1437,7 +1447,7 @@ void hrtimer_run_queues(void) |
| |
| raw_spin_lock(&cpu_base->lock); |
| now = hrtimer_update_base(cpu_base); |
| - __hrtimer_run_queues(cpu_base, now); |
| + __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD); |
| raw_spin_unlock(&cpu_base->lock); |
| } |
| |