| From ddce8c9e82fddb0f93df88c7005f1df7563115b4 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Wed, 10 Mar 2010 23:07:50 +0100 |
| Subject: [PATCH] x86: Fix 32bit HIGHMEM=n compile |
| |
| commit 70331516e2d98b6f114fa15c9a9590275f666447 in tip. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
| index 2a4c2e9..657de5d 100644 |
| --- a/arch/x86/Kconfig |
| +++ b/arch/x86/Kconfig |
| @@ -2097,7 +2097,7 @@ endmenu |
| |
| config HAVE_ATOMIC_IOMAP |
| def_bool y |
| - depends on X86_32 |
| + depends on X86_32 && !PREEMPT_RT |
| |
| source "net/Kconfig" |
| |
| diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c |
| index 7f2ac8a..83b4efc 100644 |
| --- a/arch/x86/mm/highmem_32.c |
| +++ b/arch/x86/mm/highmem_32.c |
| @@ -65,52 +65,11 @@ void *__kmap_atomic_direct(struct page *page, enum km_type type) |
| return __kmap_atomic_prot(page, type, kmap_prot); |
| } |
| |
| -void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) |
| -{ |
| - enum fixed_addresses idx; |
| - unsigned long vaddr; |
| - |
| - preempt_disable(); |
| - pagefault_disable(); |
| - |
| - debug_kmap_atomic(type); |
| - idx = type + KM_TYPE_NR * smp_processor_id(); |
| - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| - set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); |
| - arch_flush_lazy_mmu_mode(); |
| - |
| - return (void *)vaddr; |
| -} |
| - |
| void *__kmap_atomic(struct page *page, enum km_type type) |
| { |
| return kmap_atomic_prot(page, type, kmap_prot); |
| } |
| |
| -void __kunmap_atomic(void *kvaddr, enum km_type type) |
| -{ |
| - unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
| - |
| - /* |
| - * Force other mappings to Oops if they'll try to access this pte |
| - * without first remap it. Keeping stale mappings around is a bad idea |
| - * also, in case the page changes cacheability attributes or becomes |
| - * a protected page in a hypervisor. |
| - */ |
| - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
| - kpte_clear_flush(kmap_pte-idx, vaddr); |
| - else { |
| -#ifdef CONFIG_DEBUG_HIGHMEM |
| - BUG_ON(vaddr < PAGE_OFFSET); |
| - BUG_ON(vaddr >= (unsigned long)high_memory); |
| -#endif |
| - } |
| - |
| - pagefault_enable(); |
| - preempt_enable(); |
| -} |
| - |
| /* |
| * This is the same as kmap_atomic() but can map memory that doesn't |
| * have a struct page associated with it. |
| @@ -139,7 +98,6 @@ EXPORT_SYMBOL(kmap); |
| EXPORT_SYMBOL(kunmap); |
| EXPORT_SYMBOL(kunmap_virt); |
| EXPORT_SYMBOL(__kmap_atomic); |
| -EXPORT_SYMBOL(__kunmap_atomic); |
| EXPORT_SYMBOL(__kmap_atomic_prot); |
| EXPORT_SYMBOL(__kmap_atomic_to_page); |
| |
| diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c |
| index 38a1a68..c889bad 100644 |
| --- a/arch/x86/mm/iomap_32.c |
| +++ b/arch/x86/mm/iomap_32.c |
| @@ -55,6 +55,56 @@ iomap_free(resource_size_t base, unsigned long size) |
| } |
| EXPORT_SYMBOL_GPL(iomap_free); |
| |
| +void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) |
| +{ |
| + enum fixed_addresses idx; |
| + unsigned long vaddr; |
| + |
| + preempt_disable(); |
| + pagefault_disable(); |
| + |
| + debug_kmap_atomic(type); |
| + idx = type + KM_TYPE_NR * smp_processor_id(); |
| + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| + set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); |
| + arch_flush_lazy_mmu_mode(); |
| + |
| + return (void *)vaddr; |
| +} |
| + |
| +void __kunmap_atomic(void *kvaddr, enum km_type type) |
| +{ |
| + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| + enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
| + |
| + /* |
| + * Force other mappings to Oops if they'll try to access this pte |
| + * without first remap it. Keeping stale mappings around is a bad idea |
| + * also, in case the page changes cacheability attributes or becomes |
| + * a protected page in a hypervisor. |
| + */ |
| + if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
| + kpte_clear_flush(kmap_pte-idx, vaddr); |
| + else { |
| +#ifdef CONFIG_DEBUG_HIGHMEM |
| + BUG_ON(vaddr < PAGE_OFFSET); |
| + BUG_ON(vaddr >= (unsigned long)high_memory); |
| +#endif |
| + } |
| + |
| + pagefault_enable(); |
| + preempt_enable(); |
| +} |
| +EXPORT_SYMBOL(__kunmap_atomic); |
| + |
| + |
| +#ifndef CONFIG_PREEMPT_RT |
| + |
| +# ifndef CONFIG_HIGHMEM |
| +# define kmap_atomic_prot_pfn(pfn, type, prot) \ |
| + __kmap_atomic_prot_pfn(pfn, type, prot) |
| +# define kunmap_atomic(kvaddr, type) __kunmap_atomic(kvaddr, type) |
| +# endif |
| /* |
| * Map 'pfn' using fixed map 'type' and protections 'prot' |
| */ |
| @@ -80,3 +130,4 @@ iounmap_atomic(void *kvaddr, enum km_type type) |
| kunmap_atomic(kvaddr, type); |
| } |
| EXPORT_SYMBOL_GPL(iounmap_atomic); |
| +#endif |
| -- |
| 1.7.0.4 |
| |