| From e9d6c157385e4efa61cb8293e425c9d8beba70d3 Mon Sep 17 00:00:00 2001 |
| From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> |
| Date: Mon, 24 May 2010 14:31:48 -0700 |
| Subject: tmpfs: insert tmpfs cache pages to inactive list at first |
| |
| From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> |
| |
| commit e9d6c157385e4efa61cb8293e425c9d8beba70d3 upstream. |
| |
| Shaohua Li reported parallel file copy on tmpfs can lead to OOM killer. |
| This is regression of caused by commit 9ff473b9a7 ("vmscan: evict |
| streaming IO first"). Wow, It is 2 years old patch! |
| |
| Currently, tmpfs file cache is inserted active list at first. This means |
| that the insertion doesn't only increase numbers of pages in anon LRU, but |
| it also reduces anon scanning ratio. Therefore, vmscan will get totally |
| confused. It scans almost only file LRU even though the system has plenty |
| unused tmpfs pages. |
| |
| Historically, lru_cache_add_active_anon() was used for two reasons. |
| 1) Intend to priotize shmem page rather than regular file cache. |
| 2) Intend to avoid reclaim priority inversion of used once pages. |
| |
| But we've lost both motivation because (1) Now we have separate anon and |
| file LRU list. then, to insert active list doesn't help such priotize. |
| (2) In past, one pte access bit will cause page activation. then to |
| insert inactive list with pte access bit mean higher priority than to |
| insert active list. Its priority inversion may lead to uninteded lru |
| chun. but it was already solved by commit 645747462 (vmscan: detect |
| mapped file pages used only once). (Thanks Hannes, you are great!) |
| |
| Thus, now we can use lru_cache_add_anon() instead. |
| |
| Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> |
| Reported-by: Shaohua Li <shaohua.li@intel.com> |
| Reviewed-by: Wu Fengguang <fengguang.wu@intel.com> |
| Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> |
| Reviewed-by: Rik van Riel <riel@redhat.com> |
| Reviewed-by: Minchan Kim <minchan.kim@gmail.com> |
| Acked-by: Hugh Dickins <hughd@google.com> |
| Cc: Henrique de Moraes Holschuh <hmh@hmh.eng.br> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| --- |
| include/linux/swap.h | 10 ---------- |
| mm/filemap.c | 4 ++-- |
| 2 files changed, 2 insertions(+), 12 deletions(-) |
| |
| --- a/include/linux/swap.h |
| +++ b/include/linux/swap.h |
| @@ -218,21 +218,11 @@ static inline void lru_cache_add_anon(st |
| __lru_cache_add(page, LRU_INACTIVE_ANON); |
| } |
| |
| -static inline void lru_cache_add_active_anon(struct page *page) |
| -{ |
| - __lru_cache_add(page, LRU_ACTIVE_ANON); |
| -} |
| - |
| static inline void lru_cache_add_file(struct page *page) |
| { |
| __lru_cache_add(page, LRU_INACTIVE_FILE); |
| } |
| |
| -static inline void lru_cache_add_active_file(struct page *page) |
| -{ |
| - __lru_cache_add(page, LRU_ACTIVE_FILE); |
| -} |
| - |
| /* linux/mm/vmscan.c */ |
| extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
| gfp_t gfp_mask, nodemask_t *mask); |
| --- a/mm/filemap.c |
| +++ b/mm/filemap.c |
| @@ -462,7 +462,7 @@ int add_to_page_cache_lru(struct page *p |
| /* |
| * Splice_read and readahead add shmem/tmpfs pages into the page cache |
| * before shmem_readpage has a chance to mark them as SwapBacked: they |
| - * need to go on the active_anon lru below, and mem_cgroup_cache_charge |
| + * need to go on the anon lru below, and mem_cgroup_cache_charge |
| * (called in add_to_page_cache) needs to know where they're going too. |
| */ |
| if (mapping_cap_swap_backed(mapping)) |
| @@ -473,7 +473,7 @@ int add_to_page_cache_lru(struct page *p |
| if (page_is_file_cache(page)) |
| lru_cache_add_file(page); |
| else |
| - lru_cache_add_active_anon(page); |
| + lru_cache_add_anon(page); |
| } |
| return ret; |
| } |