| /* |
| * Meta exception handling. |
| * |
| * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd. |
| * |
| * This file is subject to the terms and conditions of the GNU General Public |
| * License. See the file COPYING in the main directory of this archive |
| * for more details. |
| */ |
| |
| #include <linux/export.h> |
| #include <linux/sched.h> |
| #include <linux/signal.h> |
| #include <linux/kernel.h> |
| #include <linux/mm.h> |
| #include <linux/types.h> |
| #include <linux/init.h> |
| #include <linux/interrupt.h> |
| #include <linux/preempt.h> |
| #include <linux/ptrace.h> |
| #include <linux/module.h> |
| #include <linux/kallsyms.h> |
| #include <linux/kdebug.h> |
| #include <linux/kexec.h> |
| #include <linux/unistd.h> |
| #include <linux/smp.h> |
| #include <linux/slab.h> |
| #include <linux/syscalls.h> |
| |
| #include <asm/bug.h> |
| #include <asm/core_reg.h> |
| #include <asm/irqflags.h> |
| #include <asm/siginfo.h> |
| #include <asm/traps.h> |
| #include <asm/hwthread.h> |
| #include <asm/setup.h> |
| #include <asm/switch.h> |
| #include <asm/user_gateway.h> |
| #include <asm/syscall.h> |
| #include <asm/syscalls.h> |
| |
| /* Passing syscall arguments as long long is quicker. */ |
| typedef unsigned int (*LPSYSCALL) (unsigned long long, |
| unsigned long long, |
| unsigned long long); |
| |
| /* |
| * Users of LNKSET should compare the bus error bits obtained from DEFR |
| * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between |
| * different cores revisions. |
| */ |
| #define TXDEFR_LNKSET_SUCCESS 0x02000000 |
| #define TXDEFR_LNKSET_FAILURE 0x04000000 |
| |
| /* |
| * Our global TBI handle. Initialised from setup.c/setup_arch. |
| */ |
| DECLARE_PER_CPU(PTBI, pTBI); |
| |
| #ifdef CONFIG_SMP |
| static DEFINE_PER_CPU(unsigned int, trigger_mask); |
| #else |
| unsigned int global_trigger_mask; |
| EXPORT_SYMBOL(global_trigger_mask); |
| #endif |
| |
| unsigned long per_cpu__stack_save[NR_CPUS]; |
| |
| #ifdef CONFIG_METAG_ROM_WRAPPERS |
| /* |
| * ROM vector patch table. By default it points to our internal functions, |
| * but these can be overridden if a ROM patch is found to be present. |
| * NB - the TBI_VEC_RESUME vector is normally NULL, as it is implicitly |
| * called within our internal code, but it is filled out during a ROM patch |
| * installation, as we then require its address. This fact can be used to |
| * determine if a patch is installed or not. |
| */ |
| tbi_ptr tbi_vectors[] = { |
| (tbi_ptr) __TBIASyncTrigger, |
| (tbi_ptr) __TBIASyncResume, |
| (tbi_ptr) 0 /* Place holder for TBIResume in ROM */ |
| }; |
| |
| TBIRES fault_wrapper(TBIRES State, int SigNum, int Triggers, int Inst, |
| PTBI pTBI); |
| TBIRES switch1_wrapper(TBIRES State, int SigNum, int Triggers, int Inst, |
| PTBI pTBI); |
| TBIRES switchx_wrapper(TBIRES State, int SigNum, int Triggers, int Inst, |
| PTBI pTBI); |
| TBIRES trigger_wrapper(TBIRES State, int SigNum, int Triggers, int Inst, |
| PTBI pTBI); |
| TBIRES kick_wrapper(TBIRES State, int SigNum, int Triggers, int Inst, |
| PTBI pTBI); |
| #endif |
| |
| static const char * const trap_names[] = { |
| [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault", |
| [TBIXXF_SIGNUM_PGF] = "Privilege violation", |
| [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault", |
| [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure", |
| [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault", |
| [TBIXXF_SIGNUM_IPF] = "Code fetch page fault", |
| [TBIXXF_SIGNUM_DPF] = "Data access page fault", |
| [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint", |
| [TBIXXF_SIGNUM_DWF] = "Read-only data access fault", |
| }; |
| |
| const char *trap_name(int trapno) |
| { |
| if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names) |
| && trap_names[trapno]) |
| return trap_names[trapno]; |
| return "Unknown fault"; |
| } |
| |
| static DEFINE_SPINLOCK(die_lock); |
| |
| void __noreturn die(const char *str, struct pt_regs *regs, |
| long err, unsigned long addr) |
| { |
| static int die_counter; |
| |
| oops_enter(); |
| |
| spin_lock_irq(&die_lock); |
| console_verbose(); |
| bust_spinlocks(1); |
| pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff, |
| trap_name(err & 0xffff), addr, ++die_counter); |
| |
| print_modules(); |
| show_regs(regs); |
| |
| pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm, |
| task_pid_nr(current), task_stack_page(current) + THREAD_SIZE); |
| |
| bust_spinlocks(0); |
| add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
| if (kexec_should_crash(current)) |
| crash_kexec(regs); |
| |
| if (in_interrupt()) |
| panic("Fatal exception in interrupt"); |
| |
| if (panic_on_oops) |
| panic("Fatal exception"); |
| |
| spin_unlock_irq(&die_lock); |
| oops_exit(); |
| do_exit(SIGSEGV); |
| } |
| |
| #ifdef CONFIG_SOC_CHORUS2 |
| static void replay_catchbuffer(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs) |
| { |
| int reg = 0; |
| int unit = 0; |
| int mask = 0; |
| int raxxx = 0; |
| int load_size = 0; |
| int pp = 0; |
| int die = 0; |
| int datal = 0; |
| int datah = 0; |
| |
| current_thread_info()->replay_regs = regs; |
| |
| /********* READS or LOADS *************/ |
| if (pcbuf->CBFlags & TXCATCH0_READ_BIT) { |
| reg = |
| (pcbuf->CBFlags & TXCATCH0_LDRXX_BITS) >> TXCATCH0_LDRXX_S; |
| unit = |
| (pcbuf->CBFlags & TXCATCH0_LDDST_BITS) >> TXCATCH0_LDDST_S; |
| |
| if (unit) { |
| mask = 0; |
| load_size = |
| (pcbuf-> |
| CBFlags & TXCATCH0_LDL2L1_BITS) >> |
| TXCATCH0_LDL2L1_S; |
| pp = (pcbuf->CBFlags & TXCATCH0_LDM16_BIT) != 0; |
| } else { |
| raxxx = |
| (pcbuf-> |
| CBFlags & TXCATCH0_RAXX_BITS) >> TXCATCH0_RAXX_S; |
| pr_debug(" Don't yet do RD READs: raxxx %#x\n", raxxx); |
| die = 1; |
| goto out; |
| } |
| |
| switch (load_size) { |
| case 0: /* 8 bit */ |
| datal = *(char *)(pcbuf->CBAddr); |
| break; |
| |
| case 1: /* 16 bit */ |
| datal = *(short *)(pcbuf->CBAddr); |
| break; |
| |
| case 2: /* 32 bit */ |
| datal = *(int *)(pcbuf->CBAddr); |
| break; |
| |
| case 3: /* 64 bit */ |
| datal = *(int *)(pcbuf->CBAddr); |
| datah = *(int *)(pcbuf->CBAddr + 4); |
| |
| /* pp bit means swap the units we are writing to! |
| * So, if we swap here, we don't have to swap later on. |
| */ |
| if (pp) { |
| int tmp = datal; |
| datal = datah; |
| datah = tmp; |
| } |
| break; |
| |
| default: |
| pr_debug(" unknown read load_size %d\n", load_size); |
| die = 1; |
| goto out; |
| break; |
| } |
| |
| switch (unit) { |
| case 0: |
| pr_debug(" Do not do RD Reads yet\n"); |
| die = 1; |
| goto out; |
| break; |
| |
| case TXCATCH0_LDDST_D1DSP: |
| pr_debug(" Do not handle D1DSP\n"); |
| die = 1; |
| goto out; |
| break; |
| |
| case TXCATCH0_LDDST_D0DSP: |
| pr_debug(" Do not handle D0DSP\n"); |
| die = 1; |
| goto out; |
| break; |
| |
| case TXCATCH0_LDDST_TMPLT: |
| pr_debug(" Do not handle TMPLT\n"); |
| die = 1; |
| goto out; |
| break; |
| |
| case TXCATCH0_LDDST_TR: |
| pr_debug(" Do not handle TR\n"); |
| die = 1; |
| goto out; |
| break; |
| |
| case TXCATCH0_LDDST_PC: |
| pr_debug(" Do not handle PC\n"); |
| die = 1; |
| goto out; |
| break; |
| |
| case TXCATCH0_LDDST_A1: |
| /* FIXME I think this can go away if we can be |
| * sure that 64 bit loads set both units in |
| * the LDDST field. |
| */ |
| if (load_size == 3) { |
| pr_warn("single dest unit (a1) but 64 bit size, pc %#x\n", |
| regs->ctx.CurrPC); |
| /* 64bit - both units */ |
| regs->ctx.AX[reg].U1 = datal; |
| regs->ctx.AX[reg].U0 = datah; |
| } else { |
| regs->ctx.AX[reg].U1 = datal; |
| } |
| break; |
| |
| case TXCATCH0_LDDST_A0: |
| /* FIXME I think this can go away if we can be |
| * sure that 64 bit loads set both units in |
| * the LDDST field. |
| */ |
| if (load_size == 3) { |
| pr_warn("single dest unit (a0) but 64 bit size, pc %#x\n", |
| regs->ctx.CurrPC); |
| /* 64bit - both units */ |
| regs->ctx.AX[reg].U0 = datal; |
| regs->ctx.AX[reg].U1 = datah; |
| } else { |
| regs->ctx.AX[reg].U0 = datal; |
| } |
| break; |
| |
| case TXCATCH0_LDDST_D1: |
| /* FIXME - XXX - we can sanity check that we have |
| * not over-run the context both here and in other |
| * cases !!! - eg. movs to/from globregs in user |
| * space. |
| * Graham |
| */ |
| /* FIXME I think this can go away if we can be |
| * sure that 64 bit loads set both units in |
| * the LDDST field. |
| */ |
| if (load_size == 3) { |
| pr_warn("single dest unit (d1) but 64 bit size, pc %#x\n", |
| regs->ctx.CurrPC); |
| /* 64bit - both units */ |
| regs->ctx.DX[reg].U1 = datal; |
| regs->ctx.DX[reg].U0 = datah; |
| } else { |
| regs->ctx.DX[reg].U1 = datal; |
| } |
| break; |
| |
| case TXCATCH0_LDDST_D0: |
| /* FIXME I think this can go away if we can be |
| * sure that 64 bit loads set both units in |
| * the LDDST field. |
| */ |
| if (load_size == 3) { |
| pr_warn("single dest unit (d0) but 64 bit size, pc %#x\n", |
| regs->ctx.CurrPC); |
| /* 64bit - both units */ |
| regs->ctx.DX[reg].U0 = datal; |
| regs->ctx.DX[reg].U1 = datah; |
| } else { |
| regs->ctx.DX[reg].U0 = datal; |
| } |
| break; |
| |
| /* 64bit load into a pair of units */ |
| case TXCATCH0_LDDST_D0 | TXCATCH0_LDDST_D1: |
| if (load_size != 3) { |
| pr_warn("Dual data unit read with non-64bit value?\n"); |
| die = 1; |
| goto out; |
| } |
| |
| regs->ctx.DX[reg].U0 = datal; |
| regs->ctx.DX[reg].U1 = datah; |
| break; |
| |
| case TXCATCH0_LDDST_A0 | TXCATCH0_LDDST_A1: |
| if (load_size != 3) { |
| pr_warn("Dual addr unit read with non-64bit value?\n"); |
| die = 1; |
| goto out; |
| } |
| |
| regs->ctx.AX[reg].U0 = datal; |
| regs->ctx.AX[reg].U1 = datah; |
| break; |
| |
| case TXCATCH0_LDDST_CT: |
| pr_debug(" Do not handle CT reads\n"); |
| die = 1; |
| goto out; |
| break; |
| |
| default: |
| pr_debug("Unhandled unit %d\n", unit); |
| die = 1; |
| goto out; |
| break; |
| } |
| } else |
| /********************* WRITES **************/ |
| { |
| int base_addr = pcbuf->CBAddr & ~0x7; |
| |
| mask = |
| (pcbuf->CBFlags & TXCATCH0_WMASK_BITS) >> TXCATCH0_WMASK_S; |
| |
| /* The mask is an active low byte lane mask. Use it to |
| * figure out how large a transfer we want, and also to |
| * which address! |
| */ |
| switch (mask) { |
| case 0: |
| /* Full 64bit write */ |
| *(int *)(base_addr) = pcbuf->CBData.U0; |
| *(int *)(base_addr + 4) = pcbuf->CBData.U1; |
| break; |
| |
| case 0xF0: /* Bottom 32bits */ |
| *(int *)(base_addr) = pcbuf->CBData.U0; |
| break; |
| |
| case 0x0F: /* Top 32bits */ |
| *(int *)(base_addr + 4) = pcbuf->CBData.U1; |
| break; |
| |
| case 0xFC: /* Bottom 16bits */ |
| *(short *)(base_addr) = pcbuf->CBData.U0; |
| break; |
| |
| case 0xF3: /* Second 16bits */ |
| *(short *)(base_addr + 2) = pcbuf->CBData.U0 >> 16; |
| break; |
| |
| case 0xCF: /* Third 16bits */ |
| *(short *)(base_addr + 4) = pcbuf->CBData.U1; |
| break; |
| |
| case 0x3F: /* Top 16bits */ |
| *(short *)(base_addr + 6) = pcbuf->CBData.U1 >> 16; |
| break; |
| |
| case 0xFE: /* Bottom byte */ |
| *(char *)(base_addr) = pcbuf->CBData.U0; |
| break; |
| |
| case 0xFD: /* 2nd byte */ |
| *(char *)(base_addr + 1) = pcbuf->CBData.U0 >> 8; |
| break; |
| |
| case 0xFB: /* 3rd byte */ |
| *(char *)(base_addr + 2) = pcbuf->CBData.U0 >> 16; |
| break; |
| |
| case 0xF7: /* 4th byte */ |
| *(char *)(base_addr + 3) = pcbuf->CBData.U0 >> 24; |
| break; |
| |
| case 0xEF: /* 5th byte */ |
| *(char *)(base_addr + 4) = pcbuf->CBData.U1; |
| break; |
| |
| case 0xDF: /* 6th byte */ |
| *(char *)(base_addr + 5) = pcbuf->CBData.U1 >> 8; |
| break; |
| |
| case 0xBF: /* 7th byte */ |
| *(char *)(base_addr + 6) = pcbuf->CBData.U1 >> 16; |
| break; |
| |
| case 0x7F: /* Top byte */ |
| *(char *)(base_addr + 7) = pcbuf->CBData.U1 >> 24; |
| break; |
| |
| default: |
| pr_debug("Unknown catch write mask %#x\n", mask); |
| die = 1; |
| goto out; |
| break; |
| } |
| } |
| |
| out: |
| if (die) { |
| pr_debug("Failed to soft replay catch ...\n"); |
| |
| pr_debug(" pid %d, PC %#x\n", current->pid, regs->ctx.CurrPC); |
| |
| pr_debug(" CBFlags %#x\n", (int)(pcbuf->CBFlags)); |
| pr_debug(" CBAddr %#x\n", (int)(pcbuf->CBAddr)); |
| pr_debug(" CBData.U0 %#x\n", pcbuf->CBData.U0); |
| pr_debug(" CBData.U1 %#x\n", pcbuf->CBData.U1); |
| |
| if (pcbuf->CBFlags & TXCATCH0_READ_BIT) { |
| pr_debug(" Read\n"); |
| |
| pr_debug(" unit %d, reg %d\n", unit, reg); |
| } else { |
| pr_debug(" Write\n"); |
| } |
| |
| show_regs(regs); |
| /* Probably should signal user process here! */ |
| hard_processor_halt(HALT_PANIC); |
| } else { |
| /* As we have just done the catch buffer action by hand, |
| * we must now clear out the stored catch buffer state, |
| * so the hardware does not try to replay it upon resume. |
| */ |
| |
| /* What do we do if there is data in the RD pipe ? */ |
| if (regs->ctx.SaveMask & TBICTX_CBRP_BIT) { |
| pr_debug("Cannot fix up the RD pipe yet\n"); |
| die = 1; |
| goto out; |
| } |
| |
| regs->ctx.SaveMask &= ~(TBICTX_XCBF_BIT | TBICTX_CBUF_BIT); |
| pcbuf->CBFlags = 0; |
| pcbuf->CBAddr = 0; |
| pcbuf->CBData.U0 = 0; |
| pcbuf->CBData.U1 = 0; |
| } |
| |
| return; |
| } |
| #endif |
| |
| #ifdef CONFIG_METAG_DSP |
| /* |
| * The ECH encoding specifies the size of a DSPRAM as, |
| * |
| * "slots" / 4 |
| * |
| * A "slot" is the size of two DSPRAM bank entries; an entry from |
| * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank |
| * entry is 4 bytes. |
| */ |
| #define SLOT_SZ 8 |
| static inline unsigned int decode_dspram_size(unsigned int size) |
| { |
| unsigned int _sz = size & 0x7f; |
| |
| return _sz * SLOT_SZ * 4; |
| } |
| |
| static void dspram_save(struct meta_ext_context *dsp_ctx, |
| unsigned int ramA_sz, unsigned int ramB_sz) |
| { |
| unsigned int ram_sz[2]; |
| int i; |
| |
| ram_sz[0] = ramA_sz; |
| ram_sz[1] = ramB_sz; |
| |
| for (i = 0; i < 2; i++) { |
| if (ram_sz[i] != 0) { |
| unsigned int sz; |
| |
| if (i == 0) |
| sz = decode_dspram_size(ram_sz[i] >> 8); |
| else |
| sz = decode_dspram_size(ram_sz[i]); |
| |
| if (dsp_ctx->ram[i] == NULL) { |
| dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL); |
| |
| if (dsp_ctx->ram[i] == NULL) |
| panic("couldn't save DSP context"); |
| } else { |
| if (ram_sz[i] > dsp_ctx->ram_sz[i]) { |
| kfree(dsp_ctx->ram[i]); |
| |
| dsp_ctx->ram[i] = kmalloc(sz, |
| GFP_KERNEL); |
| |
| if (dsp_ctx->ram[i] == NULL) |
| panic("couldn't save DSP context"); |
| } |
| } |
| |
| if (i == 0) |
| __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]); |
| else |
| __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]); |
| |
| dsp_ctx->ram_sz[i] = ram_sz[i]; |
| } |
| } |
| } |
| #endif /* CONFIG_METAG_DSP */ |
| |
| /* |
| * Allow interrupts to be nested and save any "extended" register |
| * context state, e.g. DSP regs and RAMs. |
| */ |
| static void nest_interrupts(TBIRES State, unsigned long mask) |
| { |
| #ifdef CONFIG_METAG_DSP |
| struct meta_ext_context *dsp_ctx; |
| unsigned int D0_8; |
| |
| /* |
| * D0.8 may contain an ECH encoding. The upper 16 bits |
| * tell us what DSP resources the current process is |
| * using. OR the bits into the SaveMask so that |
| * __TBINestInts() knows what resources to save as |
| * part of this context. |
| * |
| * Don't save the context if we're nesting interrupts in the |
| * kernel because the kernel doesn't use DSP hardware. |
| */ |
| D0_8 = __core_reg_get(D0.8); |
| |
| if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) { |
| State.Sig.SaveMask |= (D0_8 >> 16); |
| |
| dsp_ctx = current->thread.dsp_context; |
| if (dsp_ctx == NULL) { |
| dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL); |
| if (dsp_ctx == NULL) |
| panic("couldn't save DSP context: ENOMEM"); |
| |
| current->thread.dsp_context = dsp_ctx; |
| } |
| |
| current->thread.user_flags |= (D0_8 & 0xffff0000); |
| __TBINestInts(State, &dsp_ctx->regs, mask); |
| dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f); |
| } else |
| __TBINestInts(State, NULL, mask); |
| #else |
| __TBINestInts(State, NULL, mask); |
| #endif |
| } |
| |
| void head_end(TBIRES State, unsigned long mask) |
| { |
| unsigned int savemask = (unsigned short)State.Sig.SaveMask; |
| unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask; |
| |
| if (savemask & TBICTX_PRIV_BIT) { |
| ctx_savemask |= TBICTX_PRIV_BIT; |
| current->thread.user_flags = savemask; |
| } |
| |
| /* Always undo the sleep bit */ |
| ctx_savemask &= ~TBICTX_WAIT_BIT; |
| |
| /* Always save the catch buffer and RD pipe if they are dirty */ |
| savemask |= TBICTX_XCBF_BIT; |
| |
| /* Only save the catch and RD if we have not already done so. |
| * Note - the RD bits are in the pCtx only, and not in the |
| * State.SaveMask. |
| */ |
| if ((savemask & TBICTX_CBUF_BIT) || |
| (ctx_savemask & TBICTX_CBRP_BIT)) { |
| /* Have we already saved the buffers though? |
| * - See TestTrack 5071 */ |
| if (ctx_savemask & TBICTX_XCBF_BIT) { |
| /* Strip off the bits so the call to __TBINestInts |
| * won't save the buffers again. */ |
| savemask &= ~TBICTX_CBUF_BIT; |
| ctx_savemask &= ~TBICTX_CBRP_BIT; |
| } |
| } |
| |
| #ifdef CONFIG_METAG_META21 |
| { |
| unsigned int depth, txdefr; |
| |
| /* |
| * Save TXDEFR state. |
| * |
| * The process may have been interrupted after a LNKSET, but |
| * before it could read the DEFR state, so we mustn't lose that |
| * state or it could end up retrying an atomic operation that |
| * succeeded. |
| * |
| * All interrupts are disabled at this point so we |
| * don't need to perform any locking. We must do this |
| * dance before we use LNKGET or LNKSET. |
| */ |
| BUG_ON(current->thread.int_depth > HARDIRQ_BITS); |
| |
| depth = current->thread.int_depth++; |
| |
| txdefr = __core_reg_get(TXDEFR); |
| |
| txdefr &= TXDEFR_BUS_STATE_BITS; |
| if (txdefr & TXDEFR_LNKSET_SUCCESS) |
| current->thread.txdefr_failure &= ~(1 << depth); |
| else |
| current->thread.txdefr_failure |= (1 << depth); |
| } |
| #endif |
| |
| State.Sig.SaveMask = savemask; |
| State.Sig.pCtx->SaveMask = ctx_savemask; |
| |
| nest_interrupts(State, mask); |
| |
| #ifdef CONFIG_METAG_POISON_CATCH_BUFFERS |
| /* Poison the catch registers. This shows up any mistakes we have |
| * made in their handling MUCH quicker. |
| */ |
| __core_reg_set(TXCATCH0, 0x87650021); |
| __core_reg_set(TXCATCH1, 0x87654322); |
| __core_reg_set(TXCATCH2, 0x87654323); |
| __core_reg_set(TXCATCH3, 0x87654324); |
| #endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */ |
| } |
| |
| TBIRES tail_end_sys(TBIRES State, int syscall, int *restart) |
| { |
| struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; |
| unsigned long flags; |
| |
| local_irq_disable(); |
| |
| if (user_mode(regs)) { |
| flags = current_thread_info()->flags; |
| if (flags & _TIF_WORK_MASK && |
| do_work_pending(regs, flags, syscall)) { |
| *restart = 1; |
| return State; |
| } |
| |
| #ifdef CONFIG_METAG_FPU |
| if (current->thread.fpu_context && |
| current->thread.fpu_context->needs_restore) { |
| __TBICtxFPURestore(State, current->thread.fpu_context); |
| /* |
| * Clearing this bit ensures the FP unit is not made |
| * active again unless it is used. |
| */ |
| State.Sig.SaveMask &= ~TBICTX_FPAC_BIT; |
| current->thread.fpu_context->needs_restore = false; |
| } |
| State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR); |
| #endif |
| } |
| |
| #ifdef CONFIG_SOC_CHORUS2 |
| if (State.Sig.pCtx->SaveMask & (TBICTX_CBUF_BIT | TBICTX_XCBF_BIT)) { |
| PTBICTXEXTCB0 cbuf = regs->extcb0; |
| |
| if (cbuf->CBFlags | cbuf->CBAddr | |
| cbuf->CBData.U0 | cbuf->CBData.U1) |
| replay_catchbuffer(cbuf, regs); |
| } |
| #endif |
| |
| /* TBI will turn interrupts back on at some point. */ |
| if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask)) |
| trace_hardirqs_on(); |
| |
| #ifdef CONFIG_METAG_DSP |
| /* |
| * If we previously saved an extended context then restore it |
| * now. Otherwise, clear D0.8 because this process is not |
| * using DSP hardware. |
| */ |
| if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) { |
| unsigned int D0_8; |
| struct meta_ext_context *dsp_ctx = current->thread.dsp_context; |
| |
| /* Make sure we're going to return to userland. */ |
| BUG_ON(current->thread.int_depth != 1); |
| |
| if (dsp_ctx->ram_sz[0] > 0) |
| __TBIDspramRestoreA(dsp_ctx->ram_sz[0], |
| dsp_ctx->ram[0]); |
| if (dsp_ctx->ram_sz[1] > 0) |
| __TBIDspramRestoreB(dsp_ctx->ram_sz[1], |
| dsp_ctx->ram[1]); |
| |
| State.Sig.SaveMask |= State.Sig.pCtx->SaveMask; |
| __TBICtxRestore(State, current->thread.dsp_context); |
| D0_8 = __core_reg_get(D0.8); |
| D0_8 |= current->thread.user_flags & 0xffff0000; |
| D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff; |
| __core_reg_set(D0.8, D0_8); |
| } else |
| __core_reg_set(D0.8, 0); |
| #endif /* CONFIG_METAG_DSP */ |
| |
| #ifdef CONFIG_METAG_META21 |
| { |
| unsigned int depth, txdefr; |
| |
| /* |
| * If there hasn't been a LNKSET since the last LNKGET then the |
| * link flag will be set, causing the next LNKSET to succeed if |
| * the addresses match. The two LNK operations may not be a pair |
| * (e.g. see atomic_read()), so the LNKSET should fail. |
| * We use a conditional-never LNKSET to clear the link flag |
| * without side effects. |
| */ |
| asm volatile("LNKSETDNV [D0Re0],D0Re0"); |
| |
| depth = --current->thread.int_depth; |
| |
| BUG_ON(user_mode(regs) && depth); |
| |
| txdefr = __core_reg_get(TXDEFR); |
| |
| txdefr &= ~TXDEFR_BUS_STATE_BITS; |
| |
| /* Do we need to restore a failure code into TXDEFR? */ |
| if (current->thread.txdefr_failure & (1 << depth)) |
| txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT); |
| else |
| txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT); |
| |
| __core_reg_set(TXDEFR, txdefr); |
| } |
| #endif |
| return State; |
| } |
| |
| #ifdef CONFIG_SMP |
| /* |
| * If we took an interrupt in the middle of __kuser_get_tls then we need |
| * to rewind the PC to the start of the function in case the process |
| * gets migrated to another thread (SMP only) and it reads the wrong tls |
| * data. |
| */ |
| static inline void _restart_critical_section(TBIRES State) |
| { |
| unsigned long get_tls_start; |
| unsigned long get_tls_end; |
| |
| get_tls_start = (unsigned long)__kuser_get_tls - |
| (unsigned long)&__user_gateway_start; |
| |
| get_tls_start += USER_GATEWAY_PAGE; |
| |
| get_tls_end = (unsigned long)__kuser_get_tls_end - |
| (unsigned long)&__user_gateway_start; |
| |
| get_tls_end += USER_GATEWAY_PAGE; |
| |
| if ((State.Sig.pCtx->CurrPC >= get_tls_start) && |
| (State.Sig.pCtx->CurrPC < get_tls_end)) |
| State.Sig.pCtx->CurrPC = get_tls_start; |
| } |
| #else |
| /* |
| * If we took an interrupt in the middle of |
| * __kuser_cmpxchg then we need to rewind the PC to the |
| * start of the function. |
| */ |
| static inline void _restart_critical_section(TBIRES State) |
| { |
| unsigned long cmpxchg_start; |
| unsigned long cmpxchg_end; |
| |
| cmpxchg_start = (unsigned long)__kuser_cmpxchg - |
| (unsigned long)&__user_gateway_start; |
| |
| cmpxchg_start += USER_GATEWAY_PAGE; |
| |
| cmpxchg_end = (unsigned long)__kuser_cmpxchg_end - |
| (unsigned long)&__user_gateway_start; |
| |
| cmpxchg_end += USER_GATEWAY_PAGE; |
| |
| if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) && |
| (State.Sig.pCtx->CurrPC < cmpxchg_end)) |
| State.Sig.pCtx->CurrPC = cmpxchg_start; |
| } |
| #endif |
| |
| /* Used by kick_handler() */ |
| void restart_critical_section(TBIRES State) |
| { |
| _restart_critical_section(State); |
| } |
| |
| TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst, |
| PTBI pTBI) |
| { |
| head_end(State, ~INTS_OFF_MASK); |
| |
| /* If we interrupted user code handle any critical sections. */ |
| if (State.Sig.SaveMask & TBICTX_PRIV_BIT) |
| _restart_critical_section(State); |
| |
| trace_hardirqs_off(); |
| |
| do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx); |
| |
| return tail_end(State); |
| } |
| |
| static unsigned int load_fault(PTBICTXEXTCB0 pbuf) |
| { |
| return pbuf->CBFlags & TXCATCH0_READ_BIT; |
| } |
| |
| static unsigned long fault_address(PTBICTXEXTCB0 pbuf) |
| { |
| return pbuf->CBAddr; |
| } |
| |
| static void unhandled_fault(struct pt_regs *regs, unsigned long addr, |
| int signo, int code, int trapno) |
| { |
| if (user_mode(regs)) { |
| siginfo_t info; |
| |
| if (show_unhandled_signals && unhandled_signal(current, signo) |
| && printk_ratelimit()) { |
| |
| pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n", |
| current->pid, regs->ctx.CurrPC, addr, |
| trapno, trap_name(trapno)); |
| print_vma_addr(" in ", regs->ctx.CurrPC); |
| print_vma_addr(" rtp in ", regs->ctx.DX[4].U1); |
| printk("\n"); |
| show_regs(regs); |
| } |
| |
| info.si_signo = signo; |
| info.si_errno = 0; |
| info.si_code = code; |
| info.si_addr = (__force void __user *)addr; |
| info.si_trapno = trapno; |
| force_sig_info(signo, &info, current); |
| } else { |
| die("Oops", regs, trapno, addr); |
| } |
| } |
| |
| static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs, |
| unsigned int data_address, int trapno) |
| { |
| int ret; |
| |
| ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno); |
| |
| #ifdef CONFIG_SOC_CHORUS2 |
| if (ret == 0) |
| replay_catchbuffer(pcbuf, regs); |
| #endif |
| |
| return ret; |
| } |
| |
| static unsigned long get_inst_fault_address(struct pt_regs *regs) |
| { |
| return regs->ctx.CurrPC; |
| } |
| |
| TBIRES fault_handler(TBIRES State, int SigNum, int Triggers, |
| int Inst, PTBI pTBI) |
| { |
| struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; |
| PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)®s->extcb0; |
| unsigned long data_address; |
| |
| head_end(State, ~INTS_OFF_MASK); |
| |
| /* Hardware breakpoint or data watch */ |
| if ((SigNum == TBIXXF_SIGNUM_IHF) || |
| ((SigNum == TBIXXF_SIGNUM_DHF) && |
| (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT | |
| TXCATCH0_WATCH0_BIT)))) { |
| State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, |
| pTBI); |
| return tail_end(State); |
| } |
| |
| local_irq_enable(); |
| |
| data_address = fault_address(pcbuf); |
| |
| switch (SigNum) { |
| case TBIXXF_SIGNUM_IGF: |
| /* 1st-level entry invalid (instruction fetch) */ |
| case TBIXXF_SIGNUM_IPF: { |
| /* 2nd-level entry invalid (instruction fetch) */ |
| unsigned long addr = get_inst_fault_address(regs); |
| #ifdef CONFIG_SOC_CHORUS2 |
| pr_err("instruction fetch fault at %#lx pid %d comm %s\n", |
| addr, current->pid, current->comm); |
| #endif |
| do_page_fault(regs, addr, 0, SigNum); |
| break; |
| } |
| |
| case TBIXXF_SIGNUM_DGF: |
| /* 1st-level entry invalid (data access) */ |
| case TBIXXF_SIGNUM_DPF: |
| /* 2nd-level entry invalid (data access) */ |
| case TBIXXF_SIGNUM_DWF: |
| /* Write to read only page */ |
| handle_data_fault(pcbuf, regs, data_address, SigNum); |
| break; |
| |
| case TBIXXF_SIGNUM_IIF: |
| /* Illegal instruction */ |
| unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC, |
| SigNum); |
| break; |
| |
| case TBIXXF_SIGNUM_DHF: |
| /* Unaligned access */ |
| unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN, |
| SigNum); |
| break; |
| case TBIXXF_SIGNUM_PGF: |
| /* Privilege violation */ |
| unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR, |
| SigNum); |
| break; |
| default: |
| BUG(); |
| break; |
| } |
| |
| return tail_end(State); |
| } |
| |
| static bool switch_is_syscall(unsigned int inst) |
| { |
| return inst == __METAG_SW_ENCODING(SYS); |
| } |
| |
| static bool switch_is_legacy_syscall(unsigned int inst) |
| { |
| return inst == __METAG_SW_ENCODING(SYS_LEGACY); |
| } |
| |
| static inline void step_over_switch(struct pt_regs *regs, unsigned int inst) |
| { |
| regs->ctx.CurrPC += 4; |
| } |
| |
| static inline int test_syscall_work(void) |
| { |
| return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK; |
| } |
| |
| TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers, |
| int Inst, PTBI pTBI) |
| { |
| struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; |
| unsigned int sysnumber; |
| unsigned long long a1_a2, a3_a4, a5_a6; |
| LPSYSCALL syscall_entry; |
| int restart; |
| |
| head_end(State, ~INTS_OFF_MASK); |
| |
| /* |
| * If this is not a syscall SWITCH it could be a breakpoint. |
| */ |
| if (!switch_is_syscall(Inst)) { |
| /* |
| * Alert the user if they're trying to use legacy system |
| * calls. This suggests they need to update their C |
| * library and build against up to date kernel headers. |
| */ |
| if (switch_is_legacy_syscall(Inst)) |
| pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n"); |
| /* |
| * We don't know how to handle the SWITCH and cannot |
| * safely ignore it, so treat all unknown switches |
| * (including breakpoints) as traps. |
| */ |
| if (notify_die(DIE_TRAP, "debug trap", regs, 0, SigNum, |
| SIGTRAP) != NOTIFY_STOP) |
| force_sig(SIGTRAP, current); |
| return tail_end(State); |
| } |
| |
| local_irq_enable(); |
| |
| restart_syscall: |
| restart = 0; |
| sysnumber = regs->ctx.DX[0].U1; |
| |
| if (test_syscall_work()) |
| sysnumber = syscall_trace_enter(regs); |
| |
| /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */ |
| step_over_switch(regs, Inst); |
| |
| if (sysnumber >= __NR_syscalls) { |
| pr_debug("unknown syscall number: %d\n", sysnumber); |
| syscall_entry = (LPSYSCALL) sys_ni_syscall; |
| } else { |
| syscall_entry = (LPSYSCALL) sys_call_table[sysnumber]; |
| } |
| |
| /* Use 64bit loads for speed. */ |
| a5_a6 = *(unsigned long long *)®s->ctx.DX[1]; |
| a3_a4 = *(unsigned long long *)®s->ctx.DX[2]; |
| a1_a2 = *(unsigned long long *)®s->ctx.DX[3]; |
| |
| /* here is the actual call to the syscall handler functions */ |
| regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6); |
| |
| if (test_syscall_work()) |
| syscall_trace_leave(regs); |
| |
| State = tail_end_sys(State, sysnumber, &restart); |
| /* Handlerless restarts shouldn't go via userland */ |
| if (restart) |
| goto restart_syscall; |
| return State; |
| } |
| |
| TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers, |
| int Inst, PTBI pTBI) |
| { |
| struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; |
| |
| /* |
| * This can be caused by any user process simply executing an unusual |
| * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the |
| * thread to stop, so signal a SIGTRAP instead. |
| */ |
| head_end(State, ~INTS_OFF_MASK); |
| if (user_mode(regs)) |
| force_sig(SIGTRAP, current); |
| else |
| State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI); |
| return tail_end(State); |
| } |
| |
| #ifdef CONFIG_METAG_META21 |
| TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) |
| { |
| struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; |
| unsigned int error_state = Triggers; |
| siginfo_t info; |
| |
| head_end(State, ~INTS_OFF_MASK); |
| |
| local_irq_enable(); |
| |
| info.si_signo = SIGFPE; |
| |
| if (error_state & TXSTAT_FPE_INVALID_BIT) |
| info.si_code = FPE_FLTINV; |
| else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT) |
| info.si_code = FPE_FLTDIV; |
| else if (error_state & TXSTAT_FPE_OVERFLOW_BIT) |
| info.si_code = FPE_FLTOVF; |
| else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT) |
| info.si_code = FPE_FLTUND; |
| else if (error_state & TXSTAT_FPE_INEXACT_BIT) |
| info.si_code = FPE_FLTRES; |
| else |
| info.si_code = 0; |
| info.si_errno = 0; |
| info.si_addr = (__force void __user *)regs->ctx.CurrPC; |
| force_sig_info(SIGFPE, &info, current); |
| |
| return tail_end(State); |
| } |
| #endif |
| |
| #ifdef CONFIG_METAG_SUSPEND_MEM |
| struct traps_context { |
| PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1]; |
| }; |
| |
| static struct traps_context *metag_traps_context; |
| |
| int traps_save_context(void) |
| { |
| unsigned long cpu = smp_processor_id(); |
| PTBI _pTBI = per_cpu(pTBI, cpu); |
| struct traps_context *context; |
| |
| context = kzalloc(sizeof(*context), GFP_ATOMIC); |
| if (!context) |
| return -ENOMEM; |
| |
| memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs)); |
| |
| metag_traps_context = context; |
| return 0; |
| } |
| |
| int traps_restore_context(void) |
| { |
| unsigned long cpu = smp_processor_id(); |
| PTBI _pTBI = per_cpu(pTBI, cpu); |
| struct traps_context *context = metag_traps_context; |
| |
| metag_traps_context = NULL; |
| |
| memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs)); |
| |
| kfree(context); |
| return 0; |
| } |
| #endif |
| |
| #ifdef CONFIG_SMP |
| static inline unsigned int _get_trigger_mask(void) |
| { |
| unsigned long cpu = smp_processor_id(); |
| return per_cpu(trigger_mask, cpu); |
| } |
| |
| unsigned int get_trigger_mask(void) |
| { |
| return _get_trigger_mask(); |
| } |
| EXPORT_SYMBOL(get_trigger_mask); |
| |
| static void set_trigger_mask(unsigned int mask) |
| { |
| unsigned long cpu = smp_processor_id(); |
| per_cpu(trigger_mask, cpu) = mask; |
| } |
| |
| void arch_local_irq_enable(void) |
| { |
| preempt_disable(); |
| arch_local_irq_restore(_get_trigger_mask()); |
| preempt_enable_no_resched(); |
| } |
| EXPORT_SYMBOL(arch_local_irq_enable); |
| #else |
| static void set_trigger_mask(unsigned int mask) |
| { |
| global_trigger_mask = mask; |
| } |
| #endif |
| |
| void __cpuinit per_cpu_trap_init(unsigned long cpu) |
| { |
| TBIRES int_context; |
| unsigned int thread = cpu_2_hwthread_id[cpu]; |
| |
| set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */ |
| TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */ |
| TBI_TRIG_BIT(TBID_SIGNUM_SW1) | |
| TBI_TRIG_BIT(TBID_SIGNUM_SWS)); |
| |
| /* non-priv - use current stack */ |
| int_context.Sig.pCtx = NULL; |
| /* Start with interrupts off */ |
| int_context.Sig.TrigMask = INTS_OFF_MASK; |
| int_context.Sig.SaveMask = 0; |
| |
| /* And call __TBIASyncTrigger() */ |
| #ifdef CONFIG_METAG_ROM_WRAPPERS |
| tbi_vectors[TBI_VEC_ASYNC_TRIGGER] (int_context); |
| #else |
| __TBIASyncTrigger(int_context); |
| #endif |
| } |
| |
| void __init trap_init(void) |
| { |
| unsigned long cpu = smp_processor_id(); |
| PTBI _pTBI = per_cpu(pTBI, cpu); |
| |
| #ifdef CONFIG_METAG_ROM_WRAPPERS |
| unsigned int *paddr; |
| int i; |
| |
| /* Check to see if there is a ROM based system that we should use |
| * instead of our internal copy. |
| */ |
| if (!tbi_vector_base) |
| panic("This core requires a tbi_vector_base parameter.\n"); |
| |
| paddr = (unsigned int *)tbi_vector_base; |
| |
| for (i = 0; i < ARRAY_SIZE(tbi_vectors); i++) { |
| if (paddr[i]) |
| tbi_vectors[i] = (tbi_ptr) paddr[i]; |
| } |
| |
| _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_wrapper; |
| _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_wrapper; |
| _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_wrapper; |
| _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_wrapper; |
| _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_wrapper; |
| _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_wrapper; |
| #else |
| _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler; |
| _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler; |
| _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler; |
| _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler; |
| _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler; |
| _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler; |
| #endif |
| |
| #ifdef CONFIG_METAG_META21 |
| _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR; |
| _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler; |
| #endif |
| |
| per_cpu_trap_init(cpu); |
| } |
| |
| void tbi_startup_interrupt(int irq) |
| { |
| unsigned long cpu = smp_processor_id(); |
| PTBI _pTBI = per_cpu(pTBI, cpu); |
| |
| BUG_ON(irq > TBID_SIGNUM_MAX); |
| |
| /* For TR1 and TR2, the thread id is encoded in the irq number */ |
| if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3) |
| cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4]; |
| |
| set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq)); |
| |
| #ifdef CONFIG_METAG_ROM_WRAPPERS |
| _pTBI->fnSigs[irq] = trigger_wrapper; |
| #else |
| _pTBI->fnSigs[irq] = trigger_handler; |
| #endif |
| } |
| |
| void tbi_shutdown_interrupt(int irq) |
| { |
| unsigned long cpu = smp_processor_id(); |
| PTBI _pTBI = per_cpu(pTBI, cpu); |
| |
| BUG_ON(irq > TBID_SIGNUM_MAX); |
| |
| set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq)); |
| |
| _pTBI->fnSigs[irq] = __TBIUnExpXXX; |
| } |
| |
| int ret_from_fork(TBIRES arg) |
| { |
| struct task_struct *prev = arg.Switch.pPara; |
| struct task_struct *tsk = current; |
| struct pt_regs *regs = task_pt_regs(tsk); |
| int (*fn)(void *); |
| TBIRES Next; |
| |
| schedule_tail(prev); |
| |
| if (tsk->flags & PF_KTHREAD) { |
| fn = (void *)regs->ctx.DX[4].U1; |
| BUG_ON(!fn); |
| |
| fn((void *)regs->ctx.DX[3].U1); |
| } |
| |
| if (test_syscall_work()) |
| syscall_trace_leave(regs); |
| |
| preempt_disable(); |
| |
| Next.Sig.TrigMask = get_trigger_mask(); |
| Next.Sig.SaveMask = 0; |
| Next.Sig.pCtx = ®s->ctx; |
| |
| set_gateway_tls(current->thread.tls_ptr); |
| |
| preempt_enable_no_resched(); |
| |
| /* And interrupts should come back on when we resume the real usermode |
| * code. Call __TBIASyncResume() |
| */ |
| #ifdef CONFIG_METAG_ROM_WRAPPERS |
| tbi_vectors[TBI_VEC_ASYNC_RESUME](tail_end(Next)); |
| #else |
| __TBIASyncResume(tail_end(Next)); |
| #endif |
| /* ASyncResume should NEVER return */ |
| BUG(); |
| return 0; |
| } |
| |
| void show_trace(struct task_struct *tsk, unsigned long *sp, |
| struct pt_regs *regs) |
| { |
| unsigned long addr; |
| #ifdef CONFIG_FRAME_POINTER |
| unsigned long fp, fpnew; |
| unsigned long stack; |
| #endif |
| |
| if (regs && user_mode(regs)) |
| return; |
| |
| printk("\nCall trace: "); |
| #ifdef CONFIG_KALLSYMS |
| printk("\n"); |
| #endif |
| |
| if (!tsk) |
| tsk = current; |
| |
| #ifdef CONFIG_FRAME_POINTER |
| if (regs) { |
| print_ip_sym(regs->ctx.CurrPC); |
| fp = regs->ctx.AX[1].U0; |
| } else { |
| fp = __core_reg_get(A0FrP); |
| } |
| |
| /* detect when the frame pointer has been used for other purposes and |
| * doesn't point to the stack (it may point completely elsewhere which |
| * kstack_end may not detect). |
| */ |
| stack = (unsigned long)task_stack_page(tsk); |
| while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) { |
| addr = __raw_readl((unsigned long *)(fp + 4)) - 4; |
| if (kernel_text_address(addr)) |
| print_ip_sym(addr); |
| else |
| break; |
| /* stack grows up, so frame pointers must decrease */ |
| fpnew = __raw_readl((unsigned long *)(fp + 0)); |
| if (fpnew >= fp) |
| break; |
| fp = fpnew; |
| } |
| #else |
| while (!kstack_end(sp)) { |
| addr = (*sp--) - 4; |
| if (kernel_text_address(addr)) |
| print_ip_sym(addr); |
| } |
| #endif |
| |
| printk("\n"); |
| |
| debug_show_held_locks(tsk); |
| } |
| |
| void show_stack(struct task_struct *tsk, unsigned long *sp) |
| { |
| if (!tsk) |
| tsk = current; |
| if (tsk == current) |
| sp = (unsigned long *)current_stack_pointer; |
| else |
| sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0; |
| |
| show_trace(tsk, sp, NULL); |
| } |