| #ifndef _I386_PGALLOC_H |
| #define _I386_PGALLOC_H |
| |
| #include <linux/config.h> |
| #include <asm/processor.h> |
| #include <asm/fixmap.h> |
| #include <linux/threads.h> |
| #include <linux/highmem.h> |
| |
| #define pmd_populate_kernel(mm, pmd, pte) \ |
| set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) |
| |
| static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) |
| { |
| set_pmd(pmd, __pmd(_PAGE_TABLE + |
| ((unsigned long long)(pte - mem_map) << |
| (unsigned long long) PAGE_SHIFT))); |
| } |
| /* |
| * Allocate and free page tables. |
| */ |
| |
| #if defined (CONFIG_X86_PAE) |
| /* |
| * We can't include <linux/slab.h> here, thus these uglinesses. |
| */ |
| struct kmem_cache_s; |
| |
| extern struct kmem_cache_s *pae_pgd_cachep; |
| extern void *kmem_cache_alloc(struct kmem_cache_s *, int); |
| extern void kmem_cache_free(struct kmem_cache_s *, void *); |
| |
| |
| static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| { |
| int i; |
| pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL); |
| |
| if (pgd) { |
| for (i = 0; i < USER_PTRS_PER_PGD; i++) { |
| unsigned long pmd = __get_free_page(GFP_KERNEL); |
| if (!pmd) |
| goto out_oom; |
| clear_page(pmd); |
| set_pgd(pgd + i, __pgd(1 + __pa(pmd))); |
| } |
| memcpy(pgd + USER_PTRS_PER_PGD, |
| swapper_pg_dir + USER_PTRS_PER_PGD, |
| (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
| } |
| return pgd; |
| out_oom: |
| for (i--; i >= 0; i--) |
| free_page((unsigned long)__va(pgd_val(pgd[i])-1)); |
| kmem_cache_free(pae_pgd_cachep, pgd); |
| return NULL; |
| } |
| |
| #else |
| |
| static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| { |
| pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); |
| |
| if (pgd) { |
| memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
| memcpy(pgd + USER_PTRS_PER_PGD, |
| swapper_pg_dir + USER_PTRS_PER_PGD, |
| (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
| } |
| return pgd; |
| } |
| |
| #endif /* CONFIG_X86_PAE */ |
| |
| static inline void pgd_free(pgd_t *pgd) |
| { |
| #if defined(CONFIG_X86_PAE) |
| int i; |
| |
| for (i = 0; i < USER_PTRS_PER_PGD; i++) |
| free_page((unsigned long)__va(pgd_val(pgd[i])-1)); |
| kmem_cache_free(pae_pgd_cachep, pgd); |
| #else |
| free_page((unsigned long)pgd); |
| #endif |
| } |
| |
| static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| { |
| int count = 0; |
| pte_t *pte; |
| |
| do { |
| pte = (pte_t *) __get_free_page(GFP_KERNEL); |
| if (pte) |
| clear_page(pte); |
| else { |
| current->state = TASK_UNINTERRUPTIBLE; |
| schedule_timeout(HZ); |
| } |
| } while (!pte && (count++ < 10)); |
| return pte; |
| } |
| |
| static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| { |
| int count = 0; |
| struct page *pte; |
| |
| do { |
| #if CONFIG_HIGHPTE |
| pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0); |
| #else |
| pte = alloc_pages(GFP_KERNEL, 0); |
| #endif |
| if (pte) |
| clear_highpage(pte); |
| else { |
| current->state = TASK_UNINTERRUPTIBLE; |
| schedule_timeout(HZ); |
| } |
| } while (!pte && (count++ < 10)); |
| return pte; |
| } |
| |
| static inline void pte_free_kernel(pte_t *pte) |
| { |
| free_page((unsigned long)pte); |
| } |
| |
| static inline void pte_free(struct page *pte) |
| { |
| __free_page(pte); |
| } |
| |
| /* |
| * allocating and freeing a pmd is trivial: the 1-entry pmd is |
| * inside the pgd, so has no extra memory associated with it. |
| * (In the PAE case we free the pmds as part of the pgd.) |
| */ |
| |
| #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) |
| #define pmd_free(x) do { } while (0) |
| #define pgd_populate(mm, pmd, pte) BUG() |
| |
| /* |
| * TLB flushing: |
| * |
| * - flush_tlb() flushes the current mm struct TLBs |
| * - flush_tlb_all() flushes all processes TLBs |
| * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| * - flush_tlb_page(vma, vmaddr) flushes one page |
| * - flush_tlb_range(vma, start, end) flushes a range of pages |
| * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables |
| * |
| * ..but the i386 has somewhat limited tlb flushing capabilities, |
| * and page-granular flushes are available only on i486 and up. |
| */ |
| |
| #ifndef CONFIG_SMP |
| |
| #define flush_tlb() __flush_tlb() |
| #define flush_tlb_all() __flush_tlb_all() |
| #define local_flush_tlb() __flush_tlb() |
| |
| static inline void flush_tlb_mm(struct mm_struct *mm) |
| { |
| if (mm == current->active_mm) |
| __flush_tlb(); |
| } |
| |
| static inline void flush_tlb_page(struct vm_area_struct *vma, |
| unsigned long addr) |
| { |
| if (vma->vm_mm == current->active_mm) |
| __flush_tlb_one(addr); |
| } |
| |
| static inline void flush_tlb_range(struct vm_area_struct *vma, |
| unsigned long start, unsigned long end) |
| { |
| if (vma->vm_mm == current->active_mm) |
| __flush_tlb(); |
| } |
| |
| #else |
| |
| #include <asm/smp.h> |
| |
| #define local_flush_tlb() \ |
| __flush_tlb() |
| |
| extern void flush_tlb_all(void); |
| extern void flush_tlb_current_task(void); |
| extern void flush_tlb_mm(struct mm_struct *); |
| extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
| |
| #define flush_tlb() flush_tlb_current_task() |
| |
| static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) |
| { |
| flush_tlb_mm(vma->vm_mm); |
| } |
| |
| #define TLBSTATE_OK 1 |
| #define TLBSTATE_LAZY 2 |
| |
| struct tlb_state |
| { |
| struct mm_struct *active_mm; |
| int state; |
| char __cacheline_padding[24]; |
| }; |
| extern struct tlb_state cpu_tlbstate[NR_CPUS]; |
| |
| |
| #endif |
| |
| static inline void flush_tlb_pgtables(struct mm_struct *mm, |
| unsigned long start, unsigned long end) |
| { |
| /* i386 does not keep any page table caches in TLB */ |
| } |
| |
| #define check_pgt_cache() do { } while (0) |
| |
| #endif /* _I386_PGALLOC_H */ |