|  | /* | 
|  | * Copyright (C) 2009 Wind River Systems Inc | 
|  | *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com | 
|  | * | 
|  | * based on arch/mips/mm/fault.c which is: | 
|  | * | 
|  | * Copyright (C) 1995-2000 Ralf Baechle | 
|  | * | 
|  | * This file is subject to the terms and conditions of the GNU General Public | 
|  | * License.  See the file "COPYING" in the main directory of this archive | 
|  | * for more details. | 
|  | */ | 
|  |  | 
|  | #include <linux/signal.h> | 
|  | #include <linux/sched.h> | 
|  | #include <linux/interrupt.h> | 
|  | #include <linux/kernel.h> | 
|  | #include <linux/errno.h> | 
|  | #include <linux/string.h> | 
|  | #include <linux/types.h> | 
|  | #include <linux/ptrace.h> | 
|  | #include <linux/mman.h> | 
|  | #include <linux/mm.h> | 
|  | #include <linux/module.h> | 
|  | #include <linux/uaccess.h> | 
|  | #include <linux/ptrace.h> | 
|  |  | 
|  | #include <asm/mmu_context.h> | 
|  | #include <asm/traps.h> | 
|  |  | 
|  | #define EXC_SUPERV_INSN_ACCESS	9  /* Supervisor only instruction address */ | 
|  | #define EXC_SUPERV_DATA_ACCESS	11 /* Supervisor only data address */ | 
|  | #define EXC_X_PROTECTION_FAULT	13 /* TLB permission violation (x) */ | 
|  | #define EXC_R_PROTECTION_FAULT	14 /* TLB permission violation (r) */ | 
|  | #define EXC_W_PROTECTION_FAULT	15 /* TLB permission violation (w) */ | 
|  |  | 
|  | /* | 
|  | * This routine handles page faults.  It determines the address, | 
|  | * and the problem, and then passes it off to one of the appropriate | 
|  | * routines. | 
|  | */ | 
|  | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, | 
|  | unsigned long address) | 
|  | { | 
|  | struct vm_area_struct *vma = NULL; | 
|  | struct task_struct *tsk = current; | 
|  | struct mm_struct *mm = tsk->mm; | 
|  | int code = SEGV_MAPERR; | 
|  | int fault; | 
|  | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 
|  |  | 
|  | cause >>= 2; | 
|  |  | 
|  | /* Restart the instruction */ | 
|  | regs->ea -= 4; | 
|  |  | 
|  | /* | 
|  | * We fault-in kernel-space virtual memory on-demand. The | 
|  | * 'reference' page table is init_mm.pgd. | 
|  | * | 
|  | * NOTE! We MUST NOT take any locks for this case. We may | 
|  | * be in an interrupt or a critical region, and should | 
|  | * only copy the information from the master page table, | 
|  | * nothing more. | 
|  | */ | 
|  | if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) { | 
|  | if (user_mode(regs)) | 
|  | goto bad_area_nosemaphore; | 
|  | else | 
|  | goto vmalloc_fault; | 
|  | } | 
|  |  | 
|  | if (unlikely(address >= TASK_SIZE)) | 
|  | goto bad_area_nosemaphore; | 
|  |  | 
|  | /* | 
|  | * If we're in an interrupt or have no user | 
|  | * context, we must not take the fault.. | 
|  | */ | 
|  | if (in_atomic() || !mm) | 
|  | goto bad_area_nosemaphore; | 
|  |  | 
|  | if (user_mode(regs)) | 
|  | flags |= FAULT_FLAG_USER; | 
|  |  | 
|  | if (!down_read_trylock(&mm->mmap_sem)) { | 
|  | if (!user_mode(regs) && !search_exception_tables(regs->ea)) | 
|  | goto bad_area_nosemaphore; | 
|  | retry: | 
|  | down_read(&mm->mmap_sem); | 
|  | } | 
|  |  | 
|  | vma = find_vma(mm, address); | 
|  | if (!vma) | 
|  | goto bad_area; | 
|  | if (vma->vm_start <= address) | 
|  | goto good_area; | 
|  | if (!(vma->vm_flags & VM_GROWSDOWN)) | 
|  | goto bad_area; | 
|  | if (expand_stack(vma, address)) | 
|  | goto bad_area; | 
|  | /* | 
|  | * Ok, we have a good vm_area for this memory access, so | 
|  | * we can handle it.. | 
|  | */ | 
|  | good_area: | 
|  | code = SEGV_ACCERR; | 
|  |  | 
|  | switch (cause) { | 
|  | case EXC_SUPERV_INSN_ACCESS: | 
|  | goto bad_area; | 
|  | case EXC_SUPERV_DATA_ACCESS: | 
|  | goto bad_area; | 
|  | case EXC_X_PROTECTION_FAULT: | 
|  | if (!(vma->vm_flags & VM_EXEC)) | 
|  | goto bad_area; | 
|  | break; | 
|  | case EXC_R_PROTECTION_FAULT: | 
|  | if (!(vma->vm_flags & VM_READ)) | 
|  | goto bad_area; | 
|  | break; | 
|  | case EXC_W_PROTECTION_FAULT: | 
|  | if (!(vma->vm_flags & VM_WRITE)) | 
|  | goto bad_area; | 
|  | flags = FAULT_FLAG_WRITE; | 
|  | break; | 
|  | } | 
|  |  | 
|  | survive: | 
|  | /* | 
|  | * If for any reason at all we couldn't handle the fault, | 
|  | * make sure we exit gracefully rather than endlessly redo | 
|  | * the fault. | 
|  | */ | 
|  | fault = handle_mm_fault(mm, vma, address, flags); | 
|  |  | 
|  | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | 
|  | return; | 
|  |  | 
|  | if (unlikely(fault & VM_FAULT_ERROR)) { | 
|  | if (fault & VM_FAULT_OOM) | 
|  | goto out_of_memory; | 
|  | else if (fault & VM_FAULT_SIGSEGV) | 
|  | goto bad_area; | 
|  | else if (fault & VM_FAULT_SIGBUS) | 
|  | goto do_sigbus; | 
|  | BUG(); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Major/minor page fault accounting is only done on the | 
|  | * initial attempt. If we go through a retry, it is extremely | 
|  | * likely that the page will be found in page cache at that point. | 
|  | */ | 
|  | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 
|  | if (fault & VM_FAULT_MAJOR) | 
|  | current->maj_flt++; | 
|  | else | 
|  | current->min_flt++; | 
|  | if (fault & VM_FAULT_RETRY) { | 
|  | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | 
|  | * of starvation. */ | 
|  | flags &= ~FAULT_FLAG_ALLOW_RETRY; | 
|  | flags |= FAULT_FLAG_TRIED; | 
|  |  | 
|  | /* | 
|  | * No need to up_read(&mm->mmap_sem) as we would | 
|  | * have already released it in __lock_page_or_retry | 
|  | * in mm/filemap.c. | 
|  | */ | 
|  |  | 
|  | goto retry; | 
|  | } | 
|  | } | 
|  |  | 
|  | up_read(&mm->mmap_sem); | 
|  | return; | 
|  |  | 
|  | /* | 
|  | * Something tried to access memory that isn't in our memory map.. | 
|  | * Fix it, but check if it's kernel or user first.. | 
|  | */ | 
|  | bad_area: | 
|  | up_read(&mm->mmap_sem); | 
|  |  | 
|  | bad_area_nosemaphore: | 
|  | /* User mode accesses just cause a SIGSEGV */ | 
|  | if (user_mode(regs)) { | 
|  | if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) { | 
|  | pr_info("%s: unhandled page fault (%d) at 0x%08lx, " | 
|  | "cause %ld\n", current->comm, SIGSEGV, address, cause); | 
|  | show_regs(regs); | 
|  | } | 
|  | _exception(SIGSEGV, regs, code, address); | 
|  | return; | 
|  | } | 
|  |  | 
|  | no_context: | 
|  | /* Are we prepared to handle this kernel fault? */ | 
|  | if (fixup_exception(regs)) | 
|  | return; | 
|  |  | 
|  | /* | 
|  | * Oops. The kernel tried to access some bad page. We'll have to | 
|  | * terminate things with extreme prejudice. | 
|  | */ | 
|  | bust_spinlocks(1); | 
|  |  | 
|  | pr_alert("Unable to handle kernel %s at virtual address %08lx", | 
|  | address < PAGE_SIZE ? "NULL pointer dereference" : | 
|  | "paging request", address); | 
|  | pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra, | 
|  | cause); | 
|  | panic("Oops"); | 
|  | return; | 
|  |  | 
|  | /* | 
|  | * We ran out of memory, or some other thing happened to us that made | 
|  | * us unable to handle the page fault gracefully. | 
|  | */ | 
|  | out_of_memory: | 
|  | up_read(&mm->mmap_sem); | 
|  | if (is_global_init(tsk)) { | 
|  | yield(); | 
|  | down_read(&mm->mmap_sem); | 
|  | goto survive; | 
|  | } | 
|  | if (!user_mode(regs)) | 
|  | goto no_context; | 
|  | pagefault_out_of_memory(); | 
|  | return; | 
|  |  | 
|  | do_sigbus: | 
|  | up_read(&mm->mmap_sem); | 
|  |  | 
|  | /* Kernel mode? Handle exceptions or die */ | 
|  | if (!user_mode(regs)) | 
|  | goto no_context; | 
|  |  | 
|  | _exception(SIGBUS, regs, BUS_ADRERR, address); | 
|  | return; | 
|  |  | 
|  | vmalloc_fault: | 
|  | { | 
|  | /* | 
|  | * Synchronize this task's top level page-table | 
|  | * with the 'reference' page table. | 
|  | * | 
|  | * Do _not_ use "tsk" here. We might be inside | 
|  | * an interrupt in the middle of a task switch.. | 
|  | */ | 
|  | int offset = pgd_index(address); | 
|  | pgd_t *pgd, *pgd_k; | 
|  | pud_t *pud, *pud_k; | 
|  | pmd_t *pmd, *pmd_k; | 
|  | pte_t *pte_k; | 
|  |  | 
|  | pgd = pgd_current + offset; | 
|  | pgd_k = init_mm.pgd + offset; | 
|  |  | 
|  | if (!pgd_present(*pgd_k)) | 
|  | goto no_context; | 
|  | set_pgd(pgd, *pgd_k); | 
|  |  | 
|  | pud = pud_offset(pgd, address); | 
|  | pud_k = pud_offset(pgd_k, address); | 
|  | if (!pud_present(*pud_k)) | 
|  | goto no_context; | 
|  | pmd = pmd_offset(pud, address); | 
|  | pmd_k = pmd_offset(pud_k, address); | 
|  | if (!pmd_present(*pmd_k)) | 
|  | goto no_context; | 
|  | set_pmd(pmd, *pmd_k); | 
|  |  | 
|  | pte_k = pte_offset_kernel(pmd_k, address); | 
|  | if (!pte_present(*pte_k)) | 
|  | goto no_context; | 
|  |  | 
|  | flush_tlb_one(address); | 
|  | return; | 
|  | } | 
|  | } |