| From 27d5ac5bb860c13a3247bbb9cd9781f7d54205dc Mon Sep 17 00:00:00 2001 |
| From: Linus Torvalds <torvalds@linux-foundation.org> |
| Date: Fri, 15 Oct 2010 11:09:28 -0700 |
| Subject: [PATCH] De-pessimize rds_page_copy_user |
| |
| commit 799c10559d60f159ab2232203f222f18fa3c4a5f upstream. |
| |
| Don't try to "optimize" rds_page_copy_user() by using kmap_atomic() and |
| the unsafe atomic user mode accessor functions. It's actually slower |
| than the straightforward code on any reasonable modern CPU. |
| |
| Back when the code was written (although probably not by the time it was |
| actually merged, though), 32-bit x86 may have been the dominant |
| architecture. And there kmap_atomic() can be a lot faster than kmap() |
| (unless you have very good locality, in which case the virtual address |
| caching by kmap() can overcome all the downsides). |
| |
| But these days, x86-64 may not be more populous, but it's getting there |
| (and if you care about performance, it's definitely already there - |
| you'd have upgraded your CPU's already in the last few years). And on |
| x86-64, the non-kmap_atomic() version is faster, simply because the code |
| is simpler and doesn't have the "re-try page fault" case. |
| |
| People with old hardware are not likely to care about RDS anyway, and |
| the optimization for the 32-bit case is simply buggy, since it doesn't |
| verify the user addresses properly. |
| |
| Reported-by: Dan Rosenberg <drosenberg@vsecurity.com> |
| Acked-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| --- |
| net/rds/page.c | 27 +++++++-------------------- |
| 1 files changed, 7 insertions(+), 20 deletions(-) |
| |
| diff --git a/net/rds/page.c b/net/rds/page.c |
| index 595a952..1dfbfea 100644 |
| --- a/net/rds/page.c |
| +++ b/net/rds/page.c |
| @@ -57,30 +57,17 @@ int rds_page_copy_user(struct page *page, unsigned long offset, |
| unsigned long ret; |
| void *addr; |
| |
| - if (to_user) |
| + addr = kmap(page); |
| + if (to_user) { |
| rds_stats_add(s_copy_to_user, bytes); |
| - else |
| + ret = copy_to_user(ptr, addr + offset, bytes); |
| + } else { |
| rds_stats_add(s_copy_from_user, bytes); |
| - |
| - addr = kmap_atomic(page, KM_USER0); |
| - if (to_user) |
| - ret = __copy_to_user_inatomic(ptr, addr + offset, bytes); |
| - else |
| - ret = __copy_from_user_inatomic(addr + offset, ptr, bytes); |
| - kunmap_atomic(addr, KM_USER0); |
| - |
| - if (ret) { |
| - addr = kmap(page); |
| - if (to_user) |
| - ret = copy_to_user(ptr, addr + offset, bytes); |
| - else |
| - ret = copy_from_user(addr + offset, ptr, bytes); |
| - kunmap(page); |
| - if (ret) |
| - return -EFAULT; |
| + ret = copy_from_user(addr + offset, ptr, bytes); |
| } |
| + kunmap(page); |
| |
| - return 0; |
| + return ret ? -EFAULT : 0; |
| } |
| EXPORT_SYMBOL_GPL(rds_page_copy_user); |
| |
| -- |
| 1.7.0.4 |
| |