| /* |
| * This file is subject to the terms and conditions of the GNU General Public |
| * License. See the file "COPYING" in the main directory of this archive |
| * for more details. |
| * |
| * Copyright (C) 1994 - 2001 by Ralf Baechle |
| * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| */ |
| #ifndef _ASM_PGALLOC_H |
| #define _ASM_PGALLOC_H |
| |
| #include <linux/config.h> |
| #include <linux/mm.h> |
| #include <asm/fixmap.h> |
| |
| /* TLB flushing: |
| * |
| * - flush_tlb_all() flushes all processes TLB entries |
| * - flush_tlb_mm(mm) flushes the specified mm context TLB entries |
| * - flush_tlb_page(mm, vmaddr) flushes a single page |
| * - flush_tlb_range(mm, start, end) flushes a range of pages |
| * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables |
| * - flush_tlb_one(page) flushes a single kernel page |
| */ |
| extern void local_flush_tlb_all(void); |
| extern void local_flush_tlb_mm(struct mm_struct *mm); |
| extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, |
| unsigned long end); |
| extern void local_flush_tlb_page(struct vm_area_struct *vma, |
| unsigned long page); |
| extern void local_flush_tlb_one(unsigned long page); |
| |
| #ifdef CONFIG_SMP |
| |
| extern void flush_tlb_all(void); |
| extern void flush_tlb_mm(struct mm_struct *); |
| extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long); |
| extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
| |
| #else /* CONFIG_SMP */ |
| |
| #define flush_tlb_all() local_flush_tlb_all() |
| #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) |
| #define flush_tlb_range(mm,vmaddr,end) local_flush_tlb_range(mm, vmaddr, end) |
| #define flush_tlb_page(vma,page) local_flush_tlb_page(vma, page) |
| |
| #endif /* CONFIG_SMP */ |
| |
| static inline void flush_tlb_pgtables(struct mm_struct *mm, |
| unsigned long start, unsigned long end) |
| { |
| /* Nothing to do on MIPS. */ |
| } |
| |
| |
| /* |
| * Allocate and free page tables. |
| */ |
| |
| #define pgd_quicklist (current_cpu_data.pgd_quick) |
| #define pmd_quicklist ((unsigned long *)0) |
| #define pte_quicklist (current_cpu_data.pte_quick) |
| #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz) |
| |
| #define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte) |
| |
| /* |
| * Initialize new page directory with pointers to invalid ptes |
| */ |
| extern void pgd_init(unsigned long page); |
| |
| static __inline__ pgd_t *get_pgd_slow(void) |
| { |
| pgd_t *ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER), *init; |
| |
| if (ret) { |
| init = pgd_offset(&init_mm, 0); |
| pgd_init((unsigned long)ret); |
| memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, |
| (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
| } |
| return ret; |
| } |
| |
| static __inline__ pgd_t *get_pgd_fast(void) |
| { |
| unsigned long *ret; |
| |
| if((ret = pgd_quicklist) != NULL) { |
| pgd_quicklist = (unsigned long *)(*ret); |
| ret[0] = ret[1]; |
| pgtable_cache_size--; |
| } else |
| ret = (unsigned long *)get_pgd_slow(); |
| return (pgd_t *)ret; |
| } |
| |
| static __inline__ void free_pgd_fast(pgd_t *pgd) |
| { |
| *(unsigned long *)pgd = (unsigned long) pgd_quicklist; |
| pgd_quicklist = (unsigned long *) pgd; |
| pgtable_cache_size++; |
| } |
| |
| static __inline__ void free_pgd_slow(pgd_t *pgd) |
| { |
| free_pages((unsigned long)pgd, PGD_ORDER); |
| } |
| |
| static __inline__ pte_t *get_pte_fast(void) |
| { |
| unsigned long *ret; |
| |
| if((ret = (unsigned long *)pte_quicklist) != NULL) { |
| pte_quicklist = (unsigned long *)(*ret); |
| ret[0] = ret[1]; |
| pgtable_cache_size--; |
| } |
| return (pte_t *)ret; |
| } |
| |
| static __inline__ void free_pte_fast(pte_t *pte) |
| { |
| *(unsigned long *)pte = (unsigned long) pte_quicklist; |
| pte_quicklist = (unsigned long *) pte; |
| pgtable_cache_size++; |
| } |
| |
| static __inline__ void free_pte_slow(pte_t *pte) |
| { |
| free_page((unsigned long)pte); |
| } |
| |
| /* We don't use pmd cache, so these are dummy routines */ |
| static __inline__ pmd_t *get_pmd_fast(void) |
| { |
| return (pmd_t *)0; |
| } |
| |
| static __inline__ void free_pmd_fast(pmd_t *pmd) |
| { |
| } |
| |
| static __inline__ void free_pmd_slow(pmd_t *pmd) |
| { |
| } |
| |
| extern void __bad_pte(pmd_t *pmd); |
| |
| static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| { |
| pte_t *pte; |
| |
| pte = (pte_t *) __get_free_page(GFP_KERNEL); |
| if (pte) |
| clear_page(pte); |
| return pte; |
| } |
| |
| static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) |
| { |
| unsigned long *ret; |
| |
| if ((ret = (unsigned long *)pte_quicklist) != NULL) { |
| pte_quicklist = (unsigned long *)(*ret); |
| ret[0] = ret[1]; |
| pgtable_cache_size--; |
| } |
| return (pte_t *)ret; |
| } |
| |
| static __inline__ void pte_free_fast(pte_t *pte) |
| { |
| *(unsigned long *)pte = (unsigned long) pte_quicklist; |
| pte_quicklist = (unsigned long *) pte; |
| pgtable_cache_size++; |
| } |
| |
| static __inline__ void pte_free_slow(pte_t *pte) |
| { |
| free_page((unsigned long)pte); |
| } |
| |
| #define pte_free(pte) pte_free_fast(pte) |
| #define pgd_free(pgd) free_pgd_fast(pgd) |
| #define pgd_alloc(mm) get_pgd_fast() |
| |
| /* |
| * allocating and freeing a pmd is trivial: the 1-entry pmd is |
| * inside the pgd, so has no extra memory associated with it. |
| */ |
| #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); }) |
| #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) |
| #define pmd_free(x) do { } while (0) |
| #define pgd_populate(mm, pmd, pte) BUG() |
| |
| extern int do_check_pgt_cache(int, int); |
| |
| #endif /* _ASM_PGALLOC_H */ |