| From 15653371c67c3fbe359ae37b720639dd4c7b42c5 Mon Sep 17 00:00:00 2001 |
| From: Russell King <rmk+kernel@arm.linux.org.uk> |
| Date: Sat, 19 Jan 2013 11:05:57 +0000 |
| Subject: ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem |
| |
| From: Russell King <rmk+kernel@arm.linux.org.uk> |
| |
| commit 15653371c67c3fbe359ae37b720639dd4c7b42c5 upstream. |
| |
| Subhash Jadavani reported this partial backtrace: |
| Now consider this call stack from MMC block driver (this is on the ARMv7 |
| based board): |
| |
| [<c001b50c>] (v7_dma_inv_range+0x30/0x48) from [<c0017b8c>] (dma_cache_maint_page+0x1c4/0x24c) |
| [<c0017b8c>] (dma_cache_maint_page+0x1c4/0x24c) from [<c0017c28>] (___dma_page_cpu_to_dev+0x14/0x1c) |
| [<c0017c28>] (___dma_page_cpu_to_dev+0x14/0x1c) from [<c0017ff8>] (dma_map_sg+0x3c/0x114) |
| |
| This is caused by incrementing the struct page pointer, and running off |
| the end of the sparsemem page array. Fix this by incrementing by pfn |
| instead, and convert the pfn to a struct page. |
| |
| Suggested-by: James Bottomley <JBottomley@Parallels.com> |
| Tested-by: Subhash Jadavani <subhashj@codeaurora.org> |
| Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| arch/arm/mm/dma-mapping.c | 18 ++++++++++-------- |
| 1 file changed, 10 insertions(+), 8 deletions(-) |
| |
| --- a/arch/arm/mm/dma-mapping.c |
| +++ b/arch/arm/mm/dma-mapping.c |
| @@ -503,25 +503,27 @@ static void dma_cache_maint_page(struct |
| size_t size, enum dma_data_direction dir, |
| void (*op)(const void *, size_t, int)) |
| { |
| + unsigned long pfn; |
| + size_t left = size; |
| + |
| + pfn = page_to_pfn(page) + offset / PAGE_SIZE; |
| + offset %= PAGE_SIZE; |
| + |
| /* |
| * A single sg entry may refer to multiple physically contiguous |
| * pages. But we still need to process highmem pages individually. |
| * If highmem is not configured then the bulk of this loop gets |
| * optimized out. |
| */ |
| - size_t left = size; |
| do { |
| size_t len = left; |
| void *vaddr; |
| |
| + page = pfn_to_page(pfn); |
| + |
| if (PageHighMem(page)) { |
| - if (len + offset > PAGE_SIZE) { |
| - if (offset >= PAGE_SIZE) { |
| - page += offset / PAGE_SIZE; |
| - offset %= PAGE_SIZE; |
| - } |
| + if (len + offset > PAGE_SIZE) |
| len = PAGE_SIZE - offset; |
| - } |
| vaddr = kmap_high_get(page); |
| if (vaddr) { |
| vaddr += offset; |
| @@ -538,7 +540,7 @@ static void dma_cache_maint_page(struct |
| op(vaddr, len, dir); |
| } |
| offset = 0; |
| - page++; |
| + pfn++; |
| left -= len; |
| } while (left); |
| } |