aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan2012-10-09 04:54:43 -0500
committerJames Hogan2013-03-02 14:09:45 -0600
commitac919f0883e53d7785745566692c8a0620abd7ea (patch)
tree521a0b0ddcab5176a1998d0b7b9faefde6e3f0ae /arch/metag
parenta2c5d4ed92bbc02ff4a37efc2adffe7d145abe4f (diff)
downloadam43-linux-kernel-ac919f0883e53d7785745566692c8a0620abd7ea.tar.gz
am43-linux-kernel-ac919f0883e53d7785745566692c8a0620abd7ea.tar.xz
am43-linux-kernel-ac919f0883e53d7785745566692c8a0620abd7ea.zip
metag: Traps
Add trap code for metag. At the lowest level Meta traps (and return from interrupt instruction - RTI) simply swap the PC and PCX registers and optionally toggle the interrupt status bit (ISTAT). Low level TBX code in tbipcx.S handles the core context save, determine the TBX signal number based on the core trigger that fired (using the TXSTATI status register), and call TBX signal handlers (mostly in traps.c) via a vector table. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/metag')
-rw-r--r--arch/metag/include/asm/switch.h21
-rw-r--r--arch/metag/include/asm/traps.h48
-rw-r--r--arch/metag/kernel/kick.c98
-rw-r--r--arch/metag/kernel/tbiunexp.S22
-rw-r--r--arch/metag/kernel/traps.c978
5 files changed, 1167 insertions, 0 deletions
diff --git a/arch/metag/include/asm/switch.h b/arch/metag/include/asm/switch.h
new file mode 100644
index 00000000000..1fd6a587c84
--- /dev/null
+++ b/arch/metag/include/asm/switch.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2012 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef _ASM_METAG_SWITCH_H
11#define _ASM_METAG_SWITCH_H
12
13/* metag SWITCH codes */
14#define __METAG_SW_PERM_BREAK 0x400002 /* compiled in breakpoint */
15#define __METAG_SW_SYS_LEGACY 0x440000 /* legacy system calls */
16#define __METAG_SW_SYS 0x440001 /* system calls */
17
18/* metag SWITCH instruction encoding */
19#define __METAG_SW_ENCODING(TYPE) (0xaf000000 | (__METAG_SW_##TYPE))
20
21#endif /* _ASM_METAG_SWITCH_H */
diff --git a/arch/metag/include/asm/traps.h b/arch/metag/include/asm/traps.h
new file mode 100644
index 00000000000..ac808740bd8
--- /dev/null
+++ b/arch/metag/include/asm/traps.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2005,2008 Imagination Technologies
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _METAG_TBIVECTORS_H
10#define _METAG_TBIVECTORS_H
11
12#ifndef __ASSEMBLY__
13
14#include <asm/tbx.h>
15
16typedef TBIRES (*kick_irq_func_t)(TBIRES, int, int, int, PTBI, int *);
17
18extern TBIRES kick_handler(TBIRES, int, int, int, PTBI);
19struct kick_irq_handler {
20 struct list_head list;
21 kick_irq_func_t func;
22};
23
24extern void kick_register_func(struct kick_irq_handler *);
25extern void kick_unregister_func(struct kick_irq_handler *);
26
27extern void head_end(TBIRES, unsigned long);
28extern void restart_critical_section(TBIRES State);
29extern TBIRES tail_end_sys(TBIRES, int, int *);
30static inline TBIRES tail_end(TBIRES state)
31{
32 return tail_end_sys(state, -1, NULL);
33}
34
35DECLARE_PER_CPU(PTBI, pTBI);
36extern PTBI pTBI_get(unsigned int);
37
38extern int ret_from_fork(TBIRES arg);
39
40extern int do_page_fault(struct pt_regs *regs, unsigned long address,
41 unsigned int write_access, unsigned int trapno);
42
43extern TBIRES __TBIUnExpXXX(TBIRES State, int SigNum, int Triggers, int Inst,
44 PTBI pTBI);
45
46#endif
47
48#endif /* _METAG_TBIVECTORS_H */
diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c
new file mode 100644
index 00000000000..c3090962c45
--- /dev/null
+++ b/arch/metag/kernel/kick.c
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2009 Imagination Technologies
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file COPYING in the main directory of this archive
6 * for more details.
7 *
8 * The Meta KICK interrupt mechanism is generally a useful feature, so
9 * we provide an interface for registering multiple interrupt
10 * handlers. All the registered interrupt handlers are "chained". When
11 * a KICK interrupt is received the first function in the list is
12 * called. If that interrupt handler cannot handle the KICK the next
13 * one is called, then the next until someone handles it (or we run
14 * out of functions). As soon as one function handles the interrupt no
15 * other handlers are called.
16 *
17 * The only downside of chaining interrupt handlers is that each
18 * handler must be able to detect whether the KICK was intended for it
19 * or not. For example, when the IPI handler runs and it sees that
20 * there are no IPI messages it must not signal that the KICK was
21 * handled, thereby giving the other handlers a chance to run.
22 *
23 * The reason that we provide our own interface for calling KICK
24 * handlers instead of using the generic kernel infrastructure is that
25 * the KICK handlers require access to a CPU's pTBI structure. So we
26 * pass it as an argument.
27 */
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/types.h>
31
32#include <asm/traps.h>
33
34/*
35 * All accesses/manipulations of kick_handlers_list should be
36 * performed while holding kick_handlers_lock.
37 */
38static DEFINE_SPINLOCK(kick_handlers_lock);
39static LIST_HEAD(kick_handlers_list);
40
41void kick_register_func(struct kick_irq_handler *kh)
42{
43 unsigned long flags;
44
45 spin_lock_irqsave(&kick_handlers_lock, flags);
46
47 list_add_tail(&kh->list, &kick_handlers_list);
48
49 spin_unlock_irqrestore(&kick_handlers_lock, flags);
50}
51
52void kick_unregister_func(struct kick_irq_handler *kh)
53{
54 unsigned long flags;
55
56 spin_lock_irqsave(&kick_handlers_lock, flags);
57
58 list_del(&kh->list);
59
60 spin_unlock_irqrestore(&kick_handlers_lock, flags);
61}
62
63TBIRES
64kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
65{
66 struct kick_irq_handler *kh;
67 struct list_head *lh;
68 int handled = 0;
69 TBIRES ret;
70
71 head_end(State, ~INTS_OFF_MASK);
72
73 /* If we interrupted user code handle any critical sections. */
74 if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
75 restart_critical_section(State);
76
77 trace_hardirqs_off();
78
79 /*
80 * There is no need to disable interrupts here because we
81 * can't nest KICK interrupts in a KICK interrupt handler.
82 */
83 spin_lock(&kick_handlers_lock);
84
85 list_for_each(lh, &kick_handlers_list) {
86 kh = list_entry(lh, struct kick_irq_handler, list);
87
88 ret = kh->func(State, SigNum, Triggers, Inst, pTBI, &handled);
89 if (handled)
90 break;
91 }
92
93 spin_unlock(&kick_handlers_lock);
94
95 WARN_ON(!handled);
96
97 return tail_end(ret);
98}
diff --git a/arch/metag/kernel/tbiunexp.S b/arch/metag/kernel/tbiunexp.S
new file mode 100644
index 00000000000..907bbe0b2e6
--- /dev/null
+++ b/arch/metag/kernel/tbiunexp.S
@@ -0,0 +1,22 @@
1/* Pass a breakpoint through to Codescape */
2
3#include <asm/tbx.h>
4
5 .text
6 .global ___TBIUnExpXXX
7 .type ___TBIUnExpXXX,function
8___TBIUnExpXXX:
9 TSTT D0Ar2,#TBICTX_CRIT_BIT ! Result of nestable int call?
10 BZ $LTBINormCase ! UnExpXXX at background level
11 MOV D0Re0,TXMASKI ! Read TXMASKI
12 XOR TXMASKI,D1Re0,D1Re0 ! Turn off BGNDHALT handling!
13 OR D0Ar2,D0Ar2,D0Re0 ! Preserve bits cleared
14$LTBINormCase:
15 MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2 ! Save args on stack
16 SETL [A0StP++],D0Ar2,D1Ar1 ! Init area for returned values
17 SWITCH #0xC20208 ! Total stack frame size 8 Dwords
18 ! write back size 2 Dwords
19 GETL D0Re0,D1Re0,[--A0StP] ! Get result
20 SUB A0StP,A0StP,#(8*3) ! Recover stack frame
21 MOV PC,D1RtP
22 .size ___TBIUnExpXXX,.-___TBIUnExpXXX
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
new file mode 100644
index 00000000000..1ad363ce1ee
--- /dev/null
+++ b/arch/metag/kernel/traps.c
@@ -0,0 +1,978 @@
1/*
2 * Meta exception handling.
3 *
4 * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/sched.h>
12#include <linux/signal.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/ptrace.h>
19#include <linux/module.h>
20#include <linux/kallsyms.h>
21#include <linux/kdebug.h>
22#include <linux/kexec.h>
23#include <linux/unistd.h>
24#include <linux/smp.h>
25#include <linux/slab.h>
26#include <linux/syscalls.h>
27
28#include <asm/bug.h>
29#include <asm/core_reg.h>
30#include <asm/irqflags.h>
31#include <asm/siginfo.h>
32#include <asm/traps.h>
33#include <asm/hwthread.h>
34#include <asm/switch.h>
35#include <asm/user_gateway.h>
36#include <asm/syscall.h>
37#include <asm/syscalls.h>
38
39/* Passing syscall arguments as long long is quicker. */
40typedef unsigned int (*LPSYSCALL) (unsigned long long,
41 unsigned long long,
42 unsigned long long);
43
44/*
45 * Users of LNKSET should compare the bus error bits obtained from DEFR
46 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
47 * different cores revisions.
48 */
49#define TXDEFR_LNKSET_SUCCESS 0x02000000
50#define TXDEFR_LNKSET_FAILURE 0x04000000
51
52/*
53 * Our global TBI handle. Initialised from setup.c/setup_arch.
54 */
55DECLARE_PER_CPU(PTBI, pTBI);
56
57#ifdef CONFIG_SMP
58static DEFINE_PER_CPU(unsigned int, trigger_mask);
59#else
60unsigned int global_trigger_mask;
61#endif
62
63unsigned long per_cpu__stack_save[NR_CPUS];
64
65static const char * const trap_names[] = {
66 [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
67 [TBIXXF_SIGNUM_PGF] = "Privilege violation",
68 [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
69 [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
70 [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
71 [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
72 [TBIXXF_SIGNUM_DPF] = "Data access page fault",
73 [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
74 [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
75};
76
77const char *trap_name(int trapno)
78{
79 if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
80 && trap_names[trapno])
81 return trap_names[trapno];
82 return "Unknown fault";
83}
84
85static DEFINE_SPINLOCK(die_lock);
86
87void die(const char *str, struct pt_regs *regs, long err,
88 unsigned long addr)
89{
90 static int die_counter;
91
92 oops_enter();
93
94 spin_lock_irq(&die_lock);
95 console_verbose();
96 bust_spinlocks(1);
97 pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
98 trap_name(err & 0xffff), addr, ++die_counter);
99
100 print_modules();
101 show_regs(regs);
102
103 pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
104 task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
105
106 bust_spinlocks(0);
107 add_taint(TAINT_DIE);
108 if (kexec_should_crash(current))
109 crash_kexec(regs);
110
111 if (in_interrupt())
112 panic("Fatal exception in interrupt");
113
114 if (panic_on_oops)
115 panic("Fatal exception");
116
117 spin_unlock_irq(&die_lock);
118 oops_exit();
119 do_exit(SIGSEGV);
120}
121
122#ifdef CONFIG_METAG_DSP
123/*
124 * The ECH encoding specifies the size of a DSPRAM as,
125 *
126 * "slots" / 4
127 *
128 * A "slot" is the size of two DSPRAM bank entries; an entry from
129 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
130 * entry is 4 bytes.
131 */
132#define SLOT_SZ 8
133static inline unsigned int decode_dspram_size(unsigned int size)
134{
135 unsigned int _sz = size & 0x7f;
136
137 return _sz * SLOT_SZ * 4;
138}
139
140static void dspram_save(struct meta_ext_context *dsp_ctx,
141 unsigned int ramA_sz, unsigned int ramB_sz)
142{
143 unsigned int ram_sz[2];
144 int i;
145
146 ram_sz[0] = ramA_sz;
147 ram_sz[1] = ramB_sz;
148
149 for (i = 0; i < 2; i++) {
150 if (ram_sz[i] != 0) {
151 unsigned int sz;
152
153 if (i == 0)
154 sz = decode_dspram_size(ram_sz[i] >> 8);
155 else
156 sz = decode_dspram_size(ram_sz[i]);
157
158 if (dsp_ctx->ram[i] == NULL) {
159 dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
160
161 if (dsp_ctx->ram[i] == NULL)
162 panic("couldn't save DSP context");
163 } else {
164 if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
165 kfree(dsp_ctx->ram[i]);
166
167 dsp_ctx->ram[i] = kmalloc(sz,
168 GFP_KERNEL);
169
170 if (dsp_ctx->ram[i] == NULL)
171 panic("couldn't save DSP context");
172 }
173 }
174
175 if (i == 0)
176 __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
177 else
178 __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
179
180 dsp_ctx->ram_sz[i] = ram_sz[i];
181 }
182 }
183}
184#endif /* CONFIG_METAG_DSP */
185
186/*
187 * Allow interrupts to be nested and save any "extended" register
188 * context state, e.g. DSP regs and RAMs.
189 */
190static void nest_interrupts(TBIRES State, unsigned long mask)
191{
192#ifdef CONFIG_METAG_DSP
193 struct meta_ext_context *dsp_ctx;
194 unsigned int D0_8;
195
196 /*
197 * D0.8 may contain an ECH encoding. The upper 16 bits
198 * tell us what DSP resources the current process is
199 * using. OR the bits into the SaveMask so that
200 * __TBINestInts() knows what resources to save as
201 * part of this context.
202 *
203 * Don't save the context if we're nesting interrupts in the
204 * kernel because the kernel doesn't use DSP hardware.
205 */
206 D0_8 = __core_reg_get(D0.8);
207
208 if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
209 State.Sig.SaveMask |= (D0_8 >> 16);
210
211 dsp_ctx = current->thread.dsp_context;
212 if (dsp_ctx == NULL) {
213 dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
214 if (dsp_ctx == NULL)
215 panic("couldn't save DSP context: ENOMEM");
216
217 current->thread.dsp_context = dsp_ctx;
218 }
219
220 current->thread.user_flags |= (D0_8 & 0xffff0000);
221 __TBINestInts(State, &dsp_ctx->regs, mask);
222 dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
223 } else
224 __TBINestInts(State, NULL, mask);
225#else
226 __TBINestInts(State, NULL, mask);
227#endif
228}
229
230void head_end(TBIRES State, unsigned long mask)
231{
232 unsigned int savemask = (unsigned short)State.Sig.SaveMask;
233 unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
234
235 if (savemask & TBICTX_PRIV_BIT) {
236 ctx_savemask |= TBICTX_PRIV_BIT;
237 current->thread.user_flags = savemask;
238 }
239
240 /* Always undo the sleep bit */
241 ctx_savemask &= ~TBICTX_WAIT_BIT;
242
243 /* Always save the catch buffer and RD pipe if they are dirty */
244 savemask |= TBICTX_XCBF_BIT;
245
246 /* Only save the catch and RD if we have not already done so.
247 * Note - the RD bits are in the pCtx only, and not in the
248 * State.SaveMask.
249 */
250 if ((savemask & TBICTX_CBUF_BIT) ||
251 (ctx_savemask & TBICTX_CBRP_BIT)) {
252 /* Have we already saved the buffers though?
253 * - See TestTrack 5071 */
254 if (ctx_savemask & TBICTX_XCBF_BIT) {
255 /* Strip off the bits so the call to __TBINestInts
256 * won't save the buffers again. */
257 savemask &= ~TBICTX_CBUF_BIT;
258 ctx_savemask &= ~TBICTX_CBRP_BIT;
259 }
260 }
261
262#ifdef CONFIG_METAG_META21
263 {
264 unsigned int depth, txdefr;
265
266 /*
267 * Save TXDEFR state.
268 *
269 * The process may have been interrupted after a LNKSET, but
270 * before it could read the DEFR state, so we mustn't lose that
271 * state or it could end up retrying an atomic operation that
272 * succeeded.
273 *
274 * All interrupts are disabled at this point so we
275 * don't need to perform any locking. We must do this
276 * dance before we use LNKGET or LNKSET.
277 */
278 BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
279
280 depth = current->thread.int_depth++;
281
282 txdefr = __core_reg_get(TXDEFR);
283
284 txdefr &= TXDEFR_BUS_STATE_BITS;
285 if (txdefr & TXDEFR_LNKSET_SUCCESS)
286 current->thread.txdefr_failure &= ~(1 << depth);
287 else
288 current->thread.txdefr_failure |= (1 << depth);
289 }
290#endif
291
292 State.Sig.SaveMask = savemask;
293 State.Sig.pCtx->SaveMask = ctx_savemask;
294
295 nest_interrupts(State, mask);
296
297#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
298 /* Poison the catch registers. This shows up any mistakes we have
299 * made in their handling MUCH quicker.
300 */
301 __core_reg_set(TXCATCH0, 0x87650021);
302 __core_reg_set(TXCATCH1, 0x87654322);
303 __core_reg_set(TXCATCH2, 0x87654323);
304 __core_reg_set(TXCATCH3, 0x87654324);
305#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
306}
307
308TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
309{
310 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
311 unsigned long flags;
312
313 local_irq_disable();
314
315 if (user_mode(regs)) {
316 flags = current_thread_info()->flags;
317 if (flags & _TIF_WORK_MASK &&
318 do_work_pending(regs, flags, syscall)) {
319 *restart = 1;
320 return State;
321 }
322
323#ifdef CONFIG_METAG_FPU
324 if (current->thread.fpu_context &&
325 current->thread.fpu_context->needs_restore) {
326 __TBICtxFPURestore(State, current->thread.fpu_context);
327 /*
328 * Clearing this bit ensures the FP unit is not made
329 * active again unless it is used.
330 */
331 State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
332 current->thread.fpu_context->needs_restore = false;
333 }
334 State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
335#endif
336 }
337
338 /* TBI will turn interrupts back on at some point. */
339 if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
340 trace_hardirqs_on();
341
342#ifdef CONFIG_METAG_DSP
343 /*
344 * If we previously saved an extended context then restore it
345 * now. Otherwise, clear D0.8 because this process is not
346 * using DSP hardware.
347 */
348 if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
349 unsigned int D0_8;
350 struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
351
352 /* Make sure we're going to return to userland. */
353 BUG_ON(current->thread.int_depth != 1);
354
355 if (dsp_ctx->ram_sz[0] > 0)
356 __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
357 dsp_ctx->ram[0]);
358 if (dsp_ctx->ram_sz[1] > 0)
359 __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
360 dsp_ctx->ram[1]);
361
362 State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
363 __TBICtxRestore(State, current->thread.dsp_context);
364 D0_8 = __core_reg_get(D0.8);
365 D0_8 |= current->thread.user_flags & 0xffff0000;
366 D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
367 __core_reg_set(D0.8, D0_8);
368 } else
369 __core_reg_set(D0.8, 0);
370#endif /* CONFIG_METAG_DSP */
371
372#ifdef CONFIG_METAG_META21
373 {
374 unsigned int depth, txdefr;
375
376 /*
377 * If there hasn't been a LNKSET since the last LNKGET then the
378 * link flag will be set, causing the next LNKSET to succeed if
379 * the addresses match. The two LNK operations may not be a pair
380 * (e.g. see atomic_read()), so the LNKSET should fail.
381 * We use a conditional-never LNKSET to clear the link flag
382 * without side effects.
383 */
384 asm volatile("LNKSETDNV [D0Re0],D0Re0");
385
386 depth = --current->thread.int_depth;
387
388 BUG_ON(user_mode(regs) && depth);
389
390 txdefr = __core_reg_get(TXDEFR);
391
392 txdefr &= ~TXDEFR_BUS_STATE_BITS;
393
394 /* Do we need to restore a failure code into TXDEFR? */
395 if (current->thread.txdefr_failure & (1 << depth))
396 txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
397 else
398 txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
399
400 __core_reg_set(TXDEFR, txdefr);
401 }
402#endif
403 return State;
404}
405
406#ifdef CONFIG_SMP
407/*
408 * If we took an interrupt in the middle of __kuser_get_tls then we need
409 * to rewind the PC to the start of the function in case the process
410 * gets migrated to another thread (SMP only) and it reads the wrong tls
411 * data.
412 */
413static inline void _restart_critical_section(TBIRES State)
414{
415 unsigned long get_tls_start;
416 unsigned long get_tls_end;
417
418 get_tls_start = (unsigned long)__kuser_get_tls -
419 (unsigned long)&__user_gateway_start;
420
421 get_tls_start += USER_GATEWAY_PAGE;
422
423 get_tls_end = (unsigned long)__kuser_get_tls_end -
424 (unsigned long)&__user_gateway_start;
425
426 get_tls_end += USER_GATEWAY_PAGE;
427
428 if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
429 (State.Sig.pCtx->CurrPC < get_tls_end))
430 State.Sig.pCtx->CurrPC = get_tls_start;
431}
432#else
433/*
434 * If we took an interrupt in the middle of
435 * __kuser_cmpxchg then we need to rewind the PC to the
436 * start of the function.
437 */
438static inline void _restart_critical_section(TBIRES State)
439{
440 unsigned long cmpxchg_start;
441 unsigned long cmpxchg_end;
442
443 cmpxchg_start = (unsigned long)__kuser_cmpxchg -
444 (unsigned long)&__user_gateway_start;
445
446 cmpxchg_start += USER_GATEWAY_PAGE;
447
448 cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
449 (unsigned long)&__user_gateway_start;
450
451 cmpxchg_end += USER_GATEWAY_PAGE;
452
453 if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
454 (State.Sig.pCtx->CurrPC < cmpxchg_end))
455 State.Sig.pCtx->CurrPC = cmpxchg_start;
456}
457#endif
458
459/* Used by kick_handler() */
460void restart_critical_section(TBIRES State)
461{
462 _restart_critical_section(State);
463}
464
465TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
466 PTBI pTBI)
467{
468 head_end(State, ~INTS_OFF_MASK);
469
470 /* If we interrupted user code handle any critical sections. */
471 if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
472 _restart_critical_section(State);
473
474 trace_hardirqs_off();
475
476 do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
477
478 return tail_end(State);
479}
480
481static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
482{
483 return pbuf->CBFlags & TXCATCH0_READ_BIT;
484}
485
486static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
487{
488 return pbuf->CBAddr;
489}
490
491static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
492 int signo, int code, int trapno)
493{
494 if (user_mode(regs)) {
495 siginfo_t info;
496
497 if (show_unhandled_signals && unhandled_signal(current, signo)
498 && printk_ratelimit()) {
499
500 pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
501 current->pid, regs->ctx.CurrPC, addr,
502 trapno, trap_name(trapno));
503 print_vma_addr(" in ", regs->ctx.CurrPC);
504 print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
505 printk("\n");
506 show_regs(regs);
507 }
508
509 info.si_signo = signo;
510 info.si_errno = 0;
511 info.si_code = code;
512 info.si_addr = (__force void __user *)addr;
513 info.si_trapno = trapno;
514 force_sig_info(signo, &info, current);
515 } else {
516 die("Oops", regs, trapno, addr);
517 }
518}
519
520static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
521 unsigned int data_address, int trapno)
522{
523 int ret;
524
525 ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
526
527 return ret;
528}
529
530static unsigned long get_inst_fault_address(struct pt_regs *regs)
531{
532 return regs->ctx.CurrPC;
533}
534
535TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
536 int Inst, PTBI pTBI)
537{
538 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
539 PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
540 unsigned long data_address;
541
542 head_end(State, ~INTS_OFF_MASK);
543
544 /* Hardware breakpoint or data watch */
545 if ((SigNum == TBIXXF_SIGNUM_IHF) ||
546 ((SigNum == TBIXXF_SIGNUM_DHF) &&
547 (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
548 TXCATCH0_WATCH0_BIT)))) {
549 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
550 pTBI);
551 return tail_end(State);
552 }
553
554 local_irq_enable();
555
556 data_address = fault_address(pcbuf);
557
558 switch (SigNum) {
559 case TBIXXF_SIGNUM_IGF:
560 /* 1st-level entry invalid (instruction fetch) */
561 case TBIXXF_SIGNUM_IPF: {
562 /* 2nd-level entry invalid (instruction fetch) */
563 unsigned long addr = get_inst_fault_address(regs);
564 do_page_fault(regs, addr, 0, SigNum);
565 break;
566 }
567
568 case TBIXXF_SIGNUM_DGF:
569 /* 1st-level entry invalid (data access) */
570 case TBIXXF_SIGNUM_DPF:
571 /* 2nd-level entry invalid (data access) */
572 case TBIXXF_SIGNUM_DWF:
573 /* Write to read only page */
574 handle_data_fault(pcbuf, regs, data_address, SigNum);
575 break;
576
577 case TBIXXF_SIGNUM_IIF:
578 /* Illegal instruction */
579 unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
580 SigNum);
581 break;
582
583 case TBIXXF_SIGNUM_DHF:
584 /* Unaligned access */
585 unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
586 SigNum);
587 break;
588 case TBIXXF_SIGNUM_PGF:
589 /* Privilege violation */
590 unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
591 SigNum);
592 break;
593 default:
594 BUG();
595 break;
596 }
597
598 return tail_end(State);
599}
600
601static bool switch_is_syscall(unsigned int inst)
602{
603 return inst == __METAG_SW_ENCODING(SYS);
604}
605
606static bool switch_is_legacy_syscall(unsigned int inst)
607{
608 return inst == __METAG_SW_ENCODING(SYS_LEGACY);
609}
610
611static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
612{
613 regs->ctx.CurrPC += 4;
614}
615
616static inline int test_syscall_work(void)
617{
618 return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
619}
620
621TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
622 int Inst, PTBI pTBI)
623{
624 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
625 unsigned int sysnumber;
626 unsigned long long a1_a2, a3_a4, a5_a6;
627 LPSYSCALL syscall_entry;
628 int restart;
629
630 head_end(State, ~INTS_OFF_MASK);
631
632 /*
633 * If this is not a syscall SWITCH it could be a breakpoint.
634 */
635 if (!switch_is_syscall(Inst)) {
636 /*
637 * Alert the user if they're trying to use legacy system
638 * calls. This suggests they need to update their C
639 * library and build against up to date kernel headers.
640 */
641 if (switch_is_legacy_syscall(Inst))
642 pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
643 /*
644 * We don't know how to handle the SWITCH and cannot
645 * safely ignore it, so treat all unknown switches
646 * (including breakpoints) as traps.
647 */
648 force_sig(SIGTRAP, current);
649 return tail_end(State);
650 }
651
652 local_irq_enable();
653
654restart_syscall:
655 restart = 0;
656 sysnumber = regs->ctx.DX[0].U1;
657
658 if (test_syscall_work())
659 sysnumber = syscall_trace_enter(regs);
660
661 /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
662 step_over_switch(regs, Inst);
663
664 if (sysnumber >= __NR_syscalls) {
665 pr_debug("unknown syscall number: %d\n", sysnumber);
666 syscall_entry = (LPSYSCALL) sys_ni_syscall;
667 } else {
668 syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
669 }
670
671 /* Use 64bit loads for speed. */
672 a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
673 a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
674 a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
675
676 /* here is the actual call to the syscall handler functions */
677 regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
678
679 if (test_syscall_work())
680 syscall_trace_leave(regs);
681
682 State = tail_end_sys(State, sysnumber, &restart);
683 /* Handlerless restarts shouldn't go via userland */
684 if (restart)
685 goto restart_syscall;
686 return State;
687}
688
689TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
690 int Inst, PTBI pTBI)
691{
692 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
693
694 /*
695 * This can be caused by any user process simply executing an unusual
696 * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
697 * thread to stop, so signal a SIGTRAP instead.
698 */
699 head_end(State, ~INTS_OFF_MASK);
700 if (user_mode(regs))
701 force_sig(SIGTRAP, current);
702 else
703 State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
704 return tail_end(State);
705}
706
707#ifdef CONFIG_METAG_META21
708TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
709{
710 struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
711 unsigned int error_state = Triggers;
712 siginfo_t info;
713
714 head_end(State, ~INTS_OFF_MASK);
715
716 local_irq_enable();
717
718 info.si_signo = SIGFPE;
719
720 if (error_state & TXSTAT_FPE_INVALID_BIT)
721 info.si_code = FPE_FLTINV;
722 else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
723 info.si_code = FPE_FLTDIV;
724 else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
725 info.si_code = FPE_FLTOVF;
726 else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
727 info.si_code = FPE_FLTUND;
728 else if (error_state & TXSTAT_FPE_INEXACT_BIT)
729 info.si_code = FPE_FLTRES;
730 else
731 info.si_code = 0;
732 info.si_errno = 0;
733 info.si_addr = (__force void __user *)regs->ctx.CurrPC;
734 force_sig_info(SIGFPE, &info, current);
735
736 return tail_end(State);
737}
738#endif
739
740#ifdef CONFIG_METAG_SUSPEND_MEM
741struct traps_context {
742 PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
743};
744
745static struct traps_context *metag_traps_context;
746
747int traps_save_context(void)
748{
749 unsigned long cpu = smp_processor_id();
750 PTBI _pTBI = per_cpu(pTBI, cpu);
751 struct traps_context *context;
752
753 context = kzalloc(sizeof(*context), GFP_ATOMIC);
754 if (!context)
755 return -ENOMEM;
756
757 memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
758
759 metag_traps_context = context;
760 return 0;
761}
762
763int traps_restore_context(void)
764{
765 unsigned long cpu = smp_processor_id();
766 PTBI _pTBI = per_cpu(pTBI, cpu);
767 struct traps_context *context = metag_traps_context;
768
769 metag_traps_context = NULL;
770
771 memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
772
773 kfree(context);
774 return 0;
775}
776#endif
777
778#ifdef CONFIG_SMP
779unsigned int get_trigger_mask(void)
780{
781 unsigned long cpu = smp_processor_id();
782 return per_cpu(trigger_mask, cpu);
783}
784
785static void set_trigger_mask(unsigned int mask)
786{
787 unsigned long cpu = smp_processor_id();
788 per_cpu(trigger_mask, cpu) = mask;
789}
790#else
791static void set_trigger_mask(unsigned int mask)
792{
793 global_trigger_mask = mask;
794}
795#endif
796
797void __cpuinit per_cpu_trap_init(unsigned long cpu)
798{
799 TBIRES int_context;
800 unsigned int thread = cpu_2_hwthread_id[cpu];
801
802 set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
803 TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
804 TBI_TRIG_BIT(TBID_SIGNUM_SW1) |
805 TBI_TRIG_BIT(TBID_SIGNUM_SWS));
806
807 /* non-priv - use current stack */
808 int_context.Sig.pCtx = NULL;
809 /* Start with interrupts off */
810 int_context.Sig.TrigMask = INTS_OFF_MASK;
811 int_context.Sig.SaveMask = 0;
812
813 /* And call __TBIASyncTrigger() */
814 __TBIASyncTrigger(int_context);
815}
816
817void __init trap_init(void)
818{
819 unsigned long cpu = smp_processor_id();
820 PTBI _pTBI = per_cpu(pTBI, cpu);
821
822 _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
823 _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
824 _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
825 _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
826 _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
827 _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler;
828
829#ifdef CONFIG_METAG_META21
830 _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
831 _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
832#endif
833
834 per_cpu_trap_init(cpu);
835}
836
837void tbi_startup_interrupt(int irq)
838{
839 unsigned long cpu = smp_processor_id();
840 PTBI _pTBI = per_cpu(pTBI, cpu);
841
842 BUG_ON(irq > TBID_SIGNUM_MAX);
843
844 /* For TR1 and TR2, the thread id is encoded in the irq number */
845 if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
846 cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
847
848 set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
849
850 _pTBI->fnSigs[irq] = trigger_handler;
851}
852
853void tbi_shutdown_interrupt(int irq)
854{
855 unsigned long cpu = smp_processor_id();
856 PTBI _pTBI = per_cpu(pTBI, cpu);
857
858 BUG_ON(irq > TBID_SIGNUM_MAX);
859
860 set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
861
862 _pTBI->fnSigs[irq] = __TBIUnExpXXX;
863}
864
865int ret_from_fork(TBIRES arg)
866{
867 struct task_struct *prev = arg.Switch.pPara;
868 struct task_struct *tsk = current;
869 struct pt_regs *regs = task_pt_regs(tsk);
870 int (*fn)(void *);
871 TBIRES Next;
872
873 schedule_tail(prev);
874
875 if (tsk->flags & PF_KTHREAD) {
876 fn = (void *)regs->ctx.DX[4].U1;
877 BUG_ON(!fn);
878
879 fn((void *)regs->ctx.DX[3].U1);
880 }
881
882 if (test_syscall_work())
883 syscall_trace_leave(regs);
884
885 preempt_disable();
886
887 Next.Sig.TrigMask = get_trigger_mask();
888 Next.Sig.SaveMask = 0;
889 Next.Sig.pCtx = &regs->ctx;
890
891 set_gateway_tls(current->thread.tls_ptr);
892
893 preempt_enable_no_resched();
894
895 /* And interrupts should come back on when we resume the real usermode
896 * code. Call __TBIASyncResume()
897 */
898 __TBIASyncResume(tail_end(Next));
899 /* ASyncResume should NEVER return */
900 BUG();
901 return 0;
902}
903
904void show_trace(struct task_struct *tsk, unsigned long *sp,
905 struct pt_regs *regs)
906{
907 unsigned long addr;
908#ifdef CONFIG_FRAME_POINTER
909 unsigned long fp, fpnew;
910 unsigned long stack;
911#endif
912
913 if (regs && user_mode(regs))
914 return;
915
916 printk("\nCall trace: ");
917#ifdef CONFIG_KALLSYMS
918 printk("\n");
919#endif
920
921 if (!tsk)
922 tsk = current;
923
924#ifdef CONFIG_FRAME_POINTER
925 if (regs) {
926 print_ip_sym(regs->ctx.CurrPC);
927 fp = regs->ctx.AX[1].U0;
928 } else {
929 fp = __core_reg_get(A0FrP);
930 }
931
932 /* detect when the frame pointer has been used for other purposes and
933 * doesn't point to the stack (it may point completely elsewhere which
934 * kstack_end may not detect).
935 */
936 stack = (unsigned long)task_stack_page(tsk);
937 while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
938 addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
939 if (kernel_text_address(addr))
940 print_ip_sym(addr);
941 else
942 break;
943 /* stack grows up, so frame pointers must decrease */
944 fpnew = __raw_readl((unsigned long *)(fp + 0));
945 if (fpnew >= fp)
946 break;
947 fp = fpnew;
948 }
949#else
950 while (!kstack_end(sp)) {
951 addr = (*sp--) - 4;
952 if (kernel_text_address(addr))
953 print_ip_sym(addr);
954 }
955#endif
956
957 printk("\n");
958
959 debug_show_held_locks(tsk);
960}
961
962void show_stack(struct task_struct *tsk, unsigned long *sp)
963{
964 if (!tsk)
965 tsk = current;
966 if (tsk == current)
967 sp = (unsigned long *)current_stack_pointer;
968 else
969 sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
970
971 show_trace(tsk, sp, NULL);
972}
973
974void dump_stack(void)
975{
976 show_stack(NULL, NULL);
977}
978EXPORT_SYMBOL(dump_stack);