patches-3.8.11-rt8.tar.xz

md5sum:
91f4fd80e240bb23d7a0ba809e4c9803  patches-3.8.11-rt8.tar.xz

Announce:
 --------------
 Dear RT Folks,

 I'm pleased to announce the 3.8.11-rt8 release.

 changes since v3.8.11-rt7:
 - suspend / resume related fix has been fixed
 - don't wakeup blocked workers (Steven Rostedt)

 Known issues:

     - SLxB is broken on PowerPC, e500 based cpus. Can't repdroduce the
       problem on mpc5200 based board.

 The delta patch against v3.8.11-rt7 is appended below and can be found here:

   https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/incr/patch-3.8.11-rt7-rt8.patch.xz

 The RT patch against 3.8.11 can be found here:

   https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patch-3.8.11-rt8.patch.xz

 The split quilt queue is available at:

   https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patches-3.8.11-rt8.tar.xz

 Sebastian
 --------------

http://marc.info/?l=linux-rt-users&m=136783593912843&w=2

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index a9d6c3a..e125de5 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -59,7 +59,7 @@
  struct mm_struct;
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3489,10 +3489,10 @@ void complete(struct completion *x)
+@@ -3491,10 +3491,10 @@ void complete(struct completion *x)
  {
  	unsigned long flags;
  
@@ -73,7 +73,7 @@
  }
  EXPORT_SYMBOL(complete);
  
-@@ -3509,10 +3509,10 @@ void complete_all(struct completion *x)
+@@ -3511,10 +3511,10 @@ void complete_all(struct completion *x)
  {
  	unsigned long flags;
  
@@ -87,7 +87,7 @@
  }
  EXPORT_SYMBOL(complete_all);
  
-@@ -3520,20 +3520,20 @@ static inline long __sched
+@@ -3522,20 +3522,20 @@ static inline long __sched
  do_wait_for_common(struct completion *x, long timeout, int state)
  {
  	if (!x->done) {
@@ -113,7 +113,7 @@
  		if (!x->done)
  			return timeout;
  	}
-@@ -3546,9 +3546,9 @@ wait_for_common(struct completion *x, lo
+@@ -3548,9 +3548,9 @@ wait_for_common(struct completion *x, lo
  {
  	might_sleep();
  
@@ -125,7 +125,7 @@
  	return timeout;
  }
  
-@@ -3679,12 +3679,12 @@ bool try_wait_for_completion(struct comp
+@@ -3681,12 +3681,12 @@ bool try_wait_for_completion(struct comp
  	unsigned long flags;
  	int ret = 1;
  
@@ -140,7 +140,7 @@
  	return ret;
  }
  EXPORT_SYMBOL(try_wait_for_completion);
-@@ -3702,10 +3702,10 @@ bool completion_done(struct completion *
+@@ -3704,10 +3704,10 @@ bool completion_done(struct completion *
  	unsigned long flags;
  	int ret = 1;
  
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 54850a6..6e8e85f 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -461,7 +461,7 @@
  #ifdef CONFIG_SCHED_DEBUG
  		p->migrate_disable_atomic--;
  #endif
-@@ -4874,6 +4874,84 @@ void do_set_cpus_allowed(struct task_str
+@@ -4876,6 +4876,84 @@ void do_set_cpus_allowed(struct task_str
  	cpumask_copy(&p->cpus_allowed, new_mask);
  }
  
diff --git a/patches/idle-state.patch b/patches/idle-state.patch
index 1958a69..5572cbe 100644
--- a/patches/idle-state.patch
+++ b/patches/idle-state.patch
@@ -9,7 +9,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4943,6 +4943,7 @@ void __cpuinit init_idle(struct task_str
+@@ -4945,6 +4945,7 @@ void __cpuinit init_idle(struct task_str
  	rcu_read_unlock();
  
  	rq->curr = rq->idle = idle;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 5dacefd..f2f4c46 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -12,4 +12,4 @@
 --- /dev/null
 +++ b/localversion-rt
 @@ -0,0 +1 @@
-+-rt7
++-rt8
diff --git a/patches/might-sleep-check-for-idle.patch b/patches/might-sleep-check-for-idle.patch
index ffa6ad0..9318cec 100644
--- a/patches/might-sleep-check-for-idle.patch
+++ b/patches/might-sleep-check-for-idle.patch
@@ -11,7 +11,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -7393,7 +7393,8 @@ void __might_sleep(const char *file, int
+@@ -7395,7 +7395,8 @@ void __might_sleep(const char *file, int
  	static unsigned long prev_jiffy;	/* ratelimiting */
  
  	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
diff --git a/patches/peter_zijlstra-frob-migrate_disable-2.patch b/patches/peter_zijlstra-frob-migrate_disable-2.patch
index 4409f5a..f996570 100644
--- a/patches/peter_zijlstra-frob-migrate_disable-2.patch
+++ b/patches/peter_zijlstra-frob-migrate_disable-2.patch
@@ -116,7 +116,7 @@
   * Callback to arch code if there's nosmp or maxcpus=0 on the
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4736,7 +4736,7 @@ void __cpuinit init_idle(struct task_str
+@@ -4738,7 +4738,7 @@ void __cpuinit init_idle(struct task_str
  #ifdef CONFIG_SMP
  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  {
@@ -125,7 +125,7 @@
  		if (p->sched_class && p->sched_class->set_cpus_allowed)
  			p->sched_class->set_cpus_allowed(p, new_mask);
  		p->nr_cpus_allowed = cpumask_weight(new_mask);
-@@ -4792,7 +4792,7 @@ int set_cpus_allowed_ptr(struct task_str
+@@ -4794,7 +4794,7 @@ int set_cpus_allowed_ptr(struct task_str
  	do_set_cpus_allowed(p, new_mask);
  
  	/* Can the task run on the task's current CPU? If so, we're done */
@@ -134,7 +134,7 @@
  		goto out;
  
  	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -4811,6 +4811,7 @@ out:
+@@ -4813,6 +4813,7 @@ out:
  }
  EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  
@@ -142,7 +142,7 @@
  void migrate_disable(void)
  {
  	struct task_struct *p = current;
-@@ -4903,6 +4904,7 @@ void migrate_enable(void)
+@@ -4905,6 +4906,7 @@ void migrate_enable(void)
  	preempt_enable();
  }
  EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/peter_zijlstra-frob-migrate_disable.patch b/patches/peter_zijlstra-frob-migrate_disable.patch
index 247a5d3..2490ffb 100644
--- a/patches/peter_zijlstra-frob-migrate_disable.patch
+++ b/patches/peter_zijlstra-frob-migrate_disable.patch
@@ -13,7 +13,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4831,7 +4831,19 @@ void migrate_disable(void)
+@@ -4833,7 +4833,19 @@ void migrate_disable(void)
  		preempt_enable();
  		return;
  	}
@@ -34,7 +34,7 @@
  	p->migrate_disable = 1;
  	mask = tsk_cpus_allowed(p);
  
-@@ -4842,7 +4854,7 @@ void migrate_disable(void)
+@@ -4844,7 +4856,7 @@ void migrate_disable(void)
  			p->sched_class->set_cpus_allowed(p, mask);
  		p->nr_cpus_allowed = cpumask_weight(mask);
  	}
@@ -43,7 +43,7 @@
  	preempt_enable();
  }
  EXPORT_SYMBOL(migrate_disable);
-@@ -4870,7 +4882,11 @@ void migrate_enable(void)
+@@ -4872,7 +4884,11 @@ void migrate_enable(void)
  		return;
  	}
  
@@ -56,7 +56,7 @@
  	p->migrate_disable = 0;
  	mask = tsk_cpus_allowed(p);
  
-@@ -4882,7 +4898,7 @@ void migrate_enable(void)
+@@ -4884,7 +4900,7 @@ void migrate_enable(void)
  		p->nr_cpus_allowed = cpumask_weight(mask);
  	}
  
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 463e487..b7755a9 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -310,7 +310,7 @@
  }
  EXPORT_SYMBOL(migrate_enable);
  #else
-@@ -3117,6 +3164,7 @@ need_resched:
+@@ -3119,6 +3166,7 @@ need_resched:
  	put_prev_task(rq, prev);
  	next = pick_next_task(rq);
  	clear_tsk_need_resched(prev);
@@ -318,7 +318,7 @@
  	rq->skip_clock_update = 0;
  
  	if (likely(prev != next)) {
-@@ -3253,6 +3301,14 @@ asmlinkage void __sched notrace preempt_
+@@ -3255,6 +3303,14 @@ asmlinkage void __sched notrace preempt_
  	if (likely(ti->preempt_count || irqs_disabled()))
  		return;
  
@@ -333,7 +333,7 @@
  	do {
  		add_preempt_count_notrace(PREEMPT_ACTIVE);
  		/*
-@@ -4864,7 +4920,9 @@ void __cpuinit init_idle(struct task_str
+@@ -4866,7 +4922,9 @@ void __cpuinit init_idle(struct task_str
  
  	/* Set the preempt count _outside_ the spinlocks! */
  	task_thread_info(idle)->preempt_count = 0;
diff --git a/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch b/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
index 0f9ae12..aba9aa7 100644
--- a/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
+++ b/patches/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
@@ -169,7 +169,7 @@
  	switch_count = &prev->nivcsw;
  	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  		if (unlikely(signal_pending_state(prev->state, prev))) {
-@@ -4736,7 +4867,7 @@ void __cpuinit init_idle(struct task_str
+@@ -4738,7 +4869,7 @@ void __cpuinit init_idle(struct task_str
  #ifdef CONFIG_SMP
  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  {
@@ -178,7 +178,7 @@
  		if (p->sched_class && p->sched_class->set_cpus_allowed)
  			p->sched_class->set_cpus_allowed(p, new_mask);
  		p->nr_cpus_allowed = cpumask_weight(new_mask);
-@@ -4811,124 +4942,6 @@ out:
+@@ -4813,124 +4944,6 @@ out:
  }
  EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  
diff --git a/patches/sched-adjust-reset-on-fork-always.patch b/patches/sched-adjust-reset-on-fork-always.patch
index b9379be..8476424 100644
--- a/patches/sched-adjust-reset-on-fork-always.patch
+++ b/patches/sched-adjust-reset-on-fork-always.patch
@@ -14,7 +14,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4119,10 +4119,13 @@ recheck:
+@@ -4121,10 +4121,13 @@ recheck:
  	}
  
  	/*
diff --git a/patches/sched-better-debug-output-for-might-sleep.patch b/patches/sched-better-debug-output-for-might-sleep.patch
index 3268af3..4f648bb 100644
--- a/patches/sched-better-debug-output-for-might-sleep.patch
+++ b/patches/sched-better-debug-output-for-might-sleep.patch
@@ -55,7 +55,7 @@
  	dump_stack();
  	add_taint(TAINT_WARN);
  }
-@@ -7310,6 +7322,13 @@ void __might_sleep(const char *file, int
+@@ -7312,6 +7324,13 @@ void __might_sleep(const char *file, int
  	debug_show_held_locks(current);
  	if (irqs_disabled())
  		print_irqtrace_events(current);
diff --git a/patches/sched-consider-pi-boosting-in-setscheduler.patch b/patches/sched-consider-pi-boosting-in-setscheduler.patch
index a482540..754aa69 100644
--- a/patches/sched-consider-pi-boosting-in-setscheduler.patch
+++ b/patches/sched-consider-pi-boosting-in-setscheduler.patch
@@ -67,7 +67,7 @@
   * This can be both boosting and unboosting. task->pi_lock must be held.
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -3766,7 +3766,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
+@@ -3768,7 +3768,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
   * This function changes the 'effective' priority of a task. It does
   * not touch ->normal_prio like __setscheduler().
   *
@@ -77,7 +77,7 @@
   */
  void rt_mutex_setprio(struct task_struct *p, int prio)
  {
-@@ -3989,20 +3990,25 @@ static struct task_struct *find_process_
+@@ -3991,20 +3992,25 @@ static struct task_struct *find_process_
  	return pid ? find_task_by_vpid(pid) : current;
  }
  
@@ -107,7 +107,7 @@
  }
  
  /*
-@@ -4024,6 +4030,7 @@ static bool check_same_owner(struct task
+@@ -4026,6 +4032,7 @@ static bool check_same_owner(struct task
  static int __sched_setscheduler(struct task_struct *p, int policy,
  				const struct sched_param *param, bool user)
  {
@@ -115,7 +115,7 @@
  	int retval, oldprio, oldpolicy = -1, on_rq, running;
  	unsigned long flags;
  	const struct sched_class *prev_class;
-@@ -4151,6 +4158,25 @@ recheck:
+@@ -4153,6 +4160,25 @@ recheck:
  		task_rq_unlock(rq, p, &flags);
  		goto recheck;
  	}
@@ -141,7 +141,7 @@
  	on_rq = p->on_rq;
  	running = task_current(rq, p);
  	if (on_rq)
-@@ -4158,9 +4184,6 @@ recheck:
+@@ -4160,9 +4186,6 @@ recheck:
  	if (running)
  		p->sched_class->put_prev_task(rq, p);
  
@@ -151,7 +151,7 @@
  	prev_class = p->sched_class;
  	__setscheduler(rq, p, policy, param->sched_priority);
  
-@@ -4173,7 +4196,6 @@ recheck:
+@@ -4175,7 +4198,6 @@ recheck:
  		 */
  		enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
  	}
diff --git a/patches/sched-enqueue-to-head.patch b/patches/sched-enqueue-to-head.patch
index 4ca9985..b69ba00 100644
--- a/patches/sched-enqueue-to-head.patch
+++ b/patches/sched-enqueue-to-head.patch
@@ -50,7 +50,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4166,8 +4166,13 @@ recheck:
+@@ -4168,8 +4168,13 @@ recheck:
  
  	if (running)
  		p->sched_class->set_curr_task(rq);
diff --git a/patches/sched-migrate-disable.patch b/patches/sched-migrate-disable.patch
index f701a3b..35e76e3 100644
--- a/patches/sched-migrate-disable.patch
+++ b/patches/sched-migrate-disable.patch
@@ -65,7 +65,7 @@
  
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4736,11 +4736,12 @@ void __cpuinit init_idle(struct task_str
+@@ -4738,11 +4738,12 @@ void __cpuinit init_idle(struct task_str
  #ifdef CONFIG_SMP
  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  {
@@ -82,7 +82,7 @@
  }
  
  /*
-@@ -4791,7 +4792,7 @@ int set_cpus_allowed_ptr(struct task_str
+@@ -4793,7 +4794,7 @@ int set_cpus_allowed_ptr(struct task_str
  	do_set_cpus_allowed(p, new_mask);
  
  	/* Can the task run on the task's current CPU? If so, we're done */
@@ -91,7 +91,7 @@
  		goto out;
  
  	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -4810,6 +4811,83 @@ out:
+@@ -4812,6 +4813,83 @@ out:
  }
  EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  
diff --git a/patches/sched-rt-fix-migrate_enable-thinko.patch b/patches/sched-rt-fix-migrate_enable-thinko.patch
index 66982b4..b07597a 100644
--- a/patches/sched-rt-fix-migrate_enable-thinko.patch
+++ b/patches/sched-rt-fix-migrate_enable-thinko.patch
@@ -22,7 +22,7 @@
 
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4888,12 +4888,14 @@ void migrate_enable(void)
+@@ -4890,12 +4890,14 @@ void migrate_enable(void)
  	 */
  	rq = this_rq();
  	raw_spin_lock_irqsave(&rq->lock, flags);
diff --git a/patches/sched-teach-migrate_disable-about-atomic-contexts.patch b/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
index 3e57a8b..8672146 100644
--- a/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
+++ b/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
@@ -51,7 +51,7 @@
  	cpumask_t cpus_allowed;
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
-@@ -4819,6 +4819,17 @@ void migrate_disable(void)
+@@ -4821,6 +4821,17 @@ void migrate_disable(void)
  	unsigned long flags;
  	struct rq *rq;
  
@@ -69,7 +69,7 @@
  	preempt_disable();
  	if (p->migrate_disable) {
  		p->migrate_disable++;
-@@ -4867,6 +4878,16 @@ void migrate_enable(void)
+@@ -4869,6 +4880,16 @@ void migrate_enable(void)
  	unsigned long flags;
  	struct rq *rq;
  
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
new file mode 100644
index 0000000..0907317
--- /dev/null
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -0,0 +1,39 @@
+From b24ee416f22bd2a2325b8f6afa5a4065dd3560e9 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Mon, 18 Mar 2013 15:12:49 -0400
+Subject: [PATCH] sched/workqueue: Only wake up idle workers if not blocked on
+ sleeping spin lock
+
+In -rt, most spin_locks() turn into mutexes. One of these spin_lock
+conversions is performed on the workqueue gcwq->lock. When the idle
+worker is worken, the first thing it will do is grab that same lock and
+it too will block, possibly jumping into the same code, but because
+nr_running would already be decremented it prevents an infinite loop.
+
+But this is still a waste of CPU cycles, and it doesn't follow the method
+of mainline, as new workers should only be woken when a worker thread is
+truly going to sleep, and not just blocked on a spin_lock().
+
+Check the saved_state too before waking up new workers.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2955,8 +2955,10 @@ need_resched:
+ 			 * If a worker went to sleep, notify and ask workqueue
+ 			 * whether it wants to wake up a task to maintain
+ 			 * concurrency.
++			 * Only call wake up if prev isn't blocked on a sleeping
++			 * spin lock.
+ 			 */
+-			if (prev->flags & PF_WQ_WORKER) {
++			if (prev->flags & PF_WQ_WORKER && !prev->saved_state) {
+ 				struct task_struct *to_wakeup;
+ 
+ 				to_wakeup = wq_worker_sleeping(prev, cpu);
diff --git a/patches/series b/patches/series
index 53490eb..aeb82d3 100644
--- a/patches/series
+++ b/patches/series
@@ -73,6 +73,7 @@
 timekeeping-move-lock-out-of-timekeeper.patch
 timekeeping-split-timekeeper-lock.patch
 timekeeping-store-cycle-last-in-timekeeper.patch
+time-timekeeping-shadow-tk-cycle_last-together-with-.patch
 timekeeping-delay-clock-cycle-last-update.patch
 timekeeping-implement-shadow-timekeeper.patch
 timekeeping-shorten-seq-count-region.patch
@@ -341,6 +342,7 @@
 sched-disable-ttwu-queue.patch
 sched-disable-rt-group-sched-on-rt.patch
 sched-ttwu-ensure-success-return-is-correct.patch
+sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
 
 # STOP MACHINE
 stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
diff --git a/patches/time-timekeeping-shadow-tk-cycle_last-together-with-.patch b/patches/time-timekeeping-shadow-tk-cycle_last-together-with-.patch
new file mode 100644
index 0000000..4de0530
--- /dev/null
+++ b/patches/time-timekeeping-shadow-tk-cycle_last-together-with-.patch
@@ -0,0 +1,27 @@
+From c27eb2e0ab0b5acd96a4b62288976f1b72789b3e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 30 Apr 2013 18:53:55 +0200
+Subject: [PATCH] time/timekeeping: shadow tk->cycle_last together with
+ clock->cycle_last
+
+Commit ("timekeeping: Store cycle_last value in timekeeper struct as
+well") introduced a tk-> based cycle_last values which needs to be reset
+on resume path as well or else ktime_get() will think that time
+increased.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/timekeeping.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -770,7 +770,7 @@ static void timekeeping_resume(void)
+ 		__timekeeping_inject_sleeptime(tk, &ts);
+ 	}
+ 	/* re-base the last cycle value */
+-	tk->clock->cycle_last = tk->clock->read(tk->clock);
++	tk->cycle_last = tk->clock->cycle_last = tk->clock->read(tk->clock);
+ 	tk->ntp_error = 0;
+ 	timekeeping_suspended = 0;
+ 	timekeeping_update(tk, false);
diff --git a/patches/timekeeping-implement-shadow-timekeeper.patch b/patches/timekeeping-implement-shadow-timekeeper.patch
index 242aa22..d692800 100644
--- a/patches/timekeeping-implement-shadow-timekeeper.patch
+++ b/patches/timekeeping-implement-shadow-timekeeper.patch
@@ -91,7 +91,7 @@
  	write_seqcount_end(&timekeeper_seq);
  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 @@ -773,7 +779,7 @@ static void timekeeping_resume(void)
- 	tk->clock->cycle_last = tk->clock->read(tk->clock);
+ 	tk->cycle_last = tk->clock->cycle_last = tk->clock->read(tk->clock);
  	tk->ntp_error = 0;
  	timekeeping_suspended = 0;
 -	timekeeping_update(tk, false);