blob: 7f1381247726c963e5a63dae0cb5fb32d8f5532e [file] [log] [blame]
From 172f00812bbad3f1730c7718ee60d9d9978436b4 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:33 -0500
Subject: [PATCH] x86: get user pages kmap atomic fix (32bit)
commit 6c8a72dd4bcd3576b6a2758a9ce7e82cb39461df in tip.
[PG: add paravirt_types.h change from big 33rt merge commit, then
delete paravirt parts that go away due to upstream dad52fc0116]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 8391a46..28f471b 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -64,6 +64,7 @@ void kunmap(struct page *page);
void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
void *__kmap_atomic(struct page *page, enum km_type type);
+void *__kmap_atomic_direct(struct page *page, enum km_type type);
void __kunmap_atomic(void *kvaddr, enum km_type type);
void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type);
struct page *__kmap_atomic_to_page(void *ptr);
@@ -76,6 +77,9 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
+#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
+#define kmap_atomic_pte_direct(page, type) kmap_atomic_direct(page, type)
+
#define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
@@ -90,12 +94,16 @@ extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
# define kmap_atomic_pfn(pfn, type) kmap(pfn_to_page(pfn))
# define kunmap_atomic(kvaddr, type) do { pagefault_enable(); kunmap_virt(kvaddr); } while(0)
# define kmap_atomic_to_page(kvaddr) kmap_to_page(kvaddr)
+# define kmap_atomic_direct(page, type) __kmap_atomic_direct(page, type)
+# define kunmap_atomic_direct(kvaddr, type) __kunmap_atomic(kvaddr, type)
#else
# define kmap_atomic_prot(page, type, prot) __kmap_atomic_prot(page, type, prot)
# define kmap_atomic(page, type) __kmap_atomic(page, type)
# define kmap_atomic_pfn(pfn, type) __kmap_atomic_pfn(pfn, type)
# define kunmap_atomic(kvaddr, type) __kunmap_atomic(kvaddr, type)
# define kmap_atomic_to_page(kvaddr) __kmap_atomic_to_page(kvaddr)
+# define kmap_atomic_direct(page, type) __kmap_atomic(page, type)
+# define kunmap_atomic_direct(kvaddr, type) __kunmap_atomic(kvaddr, type)
#endif
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 47339a1..a350daa 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -59,14 +59,20 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
#define pte_offset_map_nested(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
pte_index((address)))
+#define pte_offset_map_direct(dir, address) \
+ ((pte_t *)kmap_atomic_pte_direct(pmd_page(*(dir)), __KM_PTE) + \
+ pte_index((address)))
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
+#define pte_unmap_direct(pte) kunmap_atomic_direct((pte), __KM_PTE)
#else
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
+#define pte_offset_map_direct(dir, address) pte_offset_map((dir), (address))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
+#define pte_unmap_direct(pte) do { } while (0)
#endif
/* Clear a kernel PTE and flush it from the TLB */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 181be52..3057193 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -126,8 +126,10 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) /* NOP */
-#define pte_unmap_nested(pte) /* NOP */
+#define pte_offset_map_direct(dir, address) pte_offset_kernel((dir), (address))
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#define pte_unmap_direct(pte) do { } while (0)
#define update_mmu_cache(vma, address, ptep) do { } while (0)
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 738e659..5aeae53 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -77,13 +77,13 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
if (write)
mask |= _PAGE_RW;
- ptep = pte_offset_map(&pmd, addr);
+ ptep = pte_offset_map_direct(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
- pte_unmap(ptep);
+ pte_unmap_direct(ptep);
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -93,7 +93,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
- pte_unmap(ptep - 1);
+ pte_unmap_direct(ptep - 1);
return 1;
}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 419d8f6..dcb1899 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -70,6 +70,11 @@ void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
return (void *)vaddr;
}
+void *__kmap_atomic_direct(struct page *page, enum km_type type)
+{
+ return __kmap_atomic_prot(page, type, kmap_prot);
+}
+
void *__kmap_atomic(struct page *page, enum km_type type)
{
return kmap_atomic_prot(page, type, kmap_prot);
@@ -105,6 +110,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
*/
void *__kmap_atomic_pfn(unsigned long pfn, enum km_type type)
{
+ preempt_disable();
return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
}
EXPORT_SYMBOL_GPL(__kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
--
1.7.0.4