| From 68aa8efcd1ab961e4684ef5af32f72a6ec1911de Mon Sep 17 00:00:00 2001 |
| From: Stanislaw Gruszka <sgruszka@redhat.com> |
| Date: Tue, 30 Apr 2013 11:35:06 +0200 |
| Subject: sched: Avoid prev->stime underflow |
| |
| From: Stanislaw Gruszka <sgruszka@redhat.com> |
| |
| commit 68aa8efcd1ab961e4684ef5af32f72a6ec1911de upstream. |
| |
| Dave Hansen reported strange utime/stime values on his system: |
| https://lkml.org/lkml/2013/4/4/435 |
| |
| This happens because prev->stime value is bigger than rtime |
| value. Root of the problem are non-monotonic rtime values (i.e. |
| current rtime is smaller than previous rtime) and that should be |
| debugged and fixed. |
| |
| But since problem did not manifest itself before commit |
| 62188451f0d63add7ad0cd2a1ae269d600c1663d "cputime: Avoid |
| multiplication overflow on utime scaling", it should be threated |
| as regression, which we can easily fixed on cputime_adjust() |
| function. |
| |
| For now, let's apply this fix, but further work is needed to fix |
| root of the problem. |
| |
| Reported-and-tested-by: Dave Hansen <dave@sr71.net> |
| Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> |
| Cc: Frederic Weisbecker <fweisbec@gmail.com> |
| Cc: rostedt@goodmis.org |
| Cc: Linus Torvalds <torvalds@linux-foundation.org> |
| Cc: Dave Hansen <dave@sr71.net> |
| Cc: Peter Zijlstra <peterz@infradead.org> |
| Link: http://lkml.kernel.org/r/1367314507-9728-3-git-send-email-sgruszka@redhat.com |
| Signed-off-by: Ingo Molnar <mingo@kernel.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| kernel/sched/cputime.c | 14 +++++++------- |
| 1 file changed, 7 insertions(+), 7 deletions(-) |
| |
| --- a/kernel/sched/cputime.c |
| +++ b/kernel/sched/cputime.c |
| @@ -574,7 +574,7 @@ static void cputime_adjust(struct task_c |
| struct cputime *prev, |
| cputime_t *ut, cputime_t *st) |
| { |
| - cputime_t rtime, stime, total; |
| + cputime_t rtime, stime, utime, total; |
| |
| stime = curr->stime; |
| total = stime + curr->utime; |
| @@ -599,13 +599,13 @@ static void cputime_adjust(struct task_c |
| if (prev->stime + prev->utime >= rtime) |
| goto out; |
| |
| - if (!rtime) { |
| - stime = 0; |
| - } else if (!total) { |
| - stime = rtime; |
| - } else { |
| + if (total) { |
| stime = scale_stime((__force u64)stime, |
| (__force u64)rtime, (__force u64)total); |
| + utime = rtime - stime; |
| + } else { |
| + stime = rtime; |
| + utime = 0; |
| } |
| |
| /* |
| @@ -614,7 +614,7 @@ static void cputime_adjust(struct task_c |
| * Let's enforce monotonicity. |
| */ |
| prev->stime = max(prev->stime, stime); |
| - prev->utime = max(prev->utime, rtime - prev->stime); |
| + prev->utime = max(prev->utime, utime); |
| |
| out: |
| *ut = prev->utime; |