mirror of
https://github.com/ukui/kernel.git
synced 2026-03-09 10:07:04 -07:00
[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 3
The attached patches provides part 3 of an architecture implementation for the Tensilica Xtensa CPU series. Signed-off-by: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
4bedea9454
commit
5a0015d626
18
arch/xtensa/kernel/Makefile
Normal file
18
arch/xtensa/kernel/Makefile
Normal file
@@ -0,0 +1,18 @@
|
||||
#
|
||||
# Makefile for the Linux/Xtensa kernel.
|
||||
#
|
||||
|
||||
extra-y := head.o vmlinux.lds
|
||||
|
||||
|
||||
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
|
||||
setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
|
||||
pci-dma.o
|
||||
|
||||
## windowspill.o
|
||||
|
||||
obj-$(CONFIG_KGDB) += xtensa-stub.o
|
||||
obj-$(CONFIG_PCI) += pci.o
|
||||
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
|
||||
|
||||
|
||||
459
arch/xtensa/kernel/align.S
Normal file
459
arch/xtensa/kernel/align.S
Normal file
@@ -0,0 +1,459 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/align.S
|
||||
*
|
||||
* Handle unalignment exceptions in kernel space.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General
|
||||
* Public License. See the file "COPYING" in the main directory of
|
||||
* this archive for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica, Inc.
|
||||
*
|
||||
* Rewritten by Chris Zankel <chris@zankel.net>
|
||||
*
|
||||
* Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/offsets.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
|
||||
/* First-level exception handler for unaligned exceptions.
|
||||
*
|
||||
* Note: This handler works only for kernel exceptions. Unaligned user
|
||||
* access should get a seg fault.
|
||||
*/
|
||||
|
||||
/* Big and little endian 16-bit values are located in
|
||||
* different halves of a register. HWORD_START helps to
|
||||
* abstract the notion of extracting a 16-bit value from a
|
||||
* register.
|
||||
* We also have to define new shifting instructions because
|
||||
* lsb and msb are on 'opposite' ends in a register for
|
||||
* different endian machines.
|
||||
*
|
||||
* Assume a memory region in ascending address:
|
||||
* 0 1 2 3|4 5 6 7
|
||||
*
|
||||
* When loading one word into a register, the content of that register is:
|
||||
* LE 3 2 1 0, 7 6 5 4
|
||||
* BE 0 1 2 3, 4 5 6 7
|
||||
*
|
||||
* Masking the bits of the higher/lower address means:
|
||||
* LE X X 0 0, 0 0 X X
|
||||
* BE 0 0 X X, X X 0 0
|
||||
*
|
||||
* Shifting to higher/lower addresses, means:
|
||||
* LE shift left / shift right
|
||||
* BE shift right / shift left
|
||||
*
|
||||
* Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
|
||||
* LE mask 0 0 X X / shift left
|
||||
* BE shift left / mask 0 0 X X
|
||||
*/
|
||||
|
||||
#define UNALIGNED_USER_EXCEPTION
|
||||
|
||||
#if XCHAL_HAVE_BE
|
||||
|
||||
#define HWORD_START 16
|
||||
#define INSN_OP0 28
|
||||
#define INSN_T 24
|
||||
#define INSN_OP1 16
|
||||
|
||||
.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
|
||||
.macro __ssa8 r; ssa8b \r; .endm
|
||||
.macro __ssa8r r; ssa8l \r; .endm
|
||||
.macro __sh r, s; srl \r, \s; .endm
|
||||
.macro __sl r, s; sll \r, \s; .endm
|
||||
.macro __exth r, s; extui \r, \s, 0, 16; .endm
|
||||
.macro __extl r, s; slli \r, \s, 16; .endm
|
||||
|
||||
#else
|
||||
|
||||
#define HWORD_START 0
|
||||
#define INSN_OP0 0
|
||||
#define INSN_T 4
|
||||
#define INSN_OP1 12
|
||||
|
||||
.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
|
||||
.macro __ssa8 r; ssa8l \r; .endm
|
||||
.macro __ssa8r r; ssa8b \r; .endm
|
||||
.macro __sh r, s; sll \r, \s; .endm
|
||||
.macro __sl r, s; srl \r, \s; .endm
|
||||
.macro __exth r, s; slli \r, \s, 16; .endm
|
||||
.macro __extl r, s; extui \r, \s, 0, 16; .endm
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* xxxx xxxx = imm8 field
|
||||
* yyyy = imm4 field
|
||||
* ssss = s field
|
||||
* tttt = t field
|
||||
*
|
||||
* 16 0
|
||||
* -------------------
|
||||
* L32I.N yyyy ssss tttt 1000
|
||||
* S32I.N yyyy ssss tttt 1001
|
||||
*
|
||||
* 23 0
|
||||
* -----------------------------
|
||||
* res 0000 0010
|
||||
* L16UI xxxx xxxx 0001 ssss tttt 0010
|
||||
* L32I xxxx xxxx 0010 ssss tttt 0010
|
||||
* XXX 0011 ssss tttt 0010
|
||||
* XXX 0100 ssss tttt 0010
|
||||
* S16I xxxx xxxx 0101 ssss tttt 0010
|
||||
* S32I xxxx xxxx 0110 ssss tttt 0010
|
||||
* XXX 0111 ssss tttt 0010
|
||||
* XXX 1000 ssss tttt 0010
|
||||
* L16SI xxxx xxxx 1001 ssss tttt 0010
|
||||
* XXX 1010 0010
|
||||
* **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
|
||||
* XXX 1100 0010
|
||||
* XXX 1101 0010
|
||||
* XXX 1110 0010
|
||||
* **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
|
||||
* -----------------------------
|
||||
* ^ ^ ^
|
||||
* sub-opcode (NIBBLE_R) -+ | |
|
||||
* t field (NIBBLE_T) -----------+ |
|
||||
* major opcode (NIBBLE_OP0) --------------+
|
||||
*/
|
||||
|
||||
#define OP0_L32I_N 0x8 /* load immediate narrow */
|
||||
#define OP0_S32I_N 0x9 /* store immediate narrow */
|
||||
#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
|
||||
#define OP1_SI_BIT 2 /* OP1 bit number for stores */
|
||||
|
||||
#define OP1_L32I 0x2
|
||||
#define OP1_L16UI 0x1
|
||||
#define OP1_L16SI 0x9
|
||||
#define OP1_L32AI 0xb
|
||||
|
||||
#define OP1_S32I 0x6
|
||||
#define OP1_S16I 0x5
|
||||
#define OP1_S32RI 0xf
|
||||
|
||||
/*
|
||||
* Entry condition:
|
||||
*
|
||||
* a0: trashed, original value saved on stack (PT_AREG0)
|
||||
* a1: a1
|
||||
* a2: new stack pointer, original in DEPC
|
||||
* a3: dispatch table
|
||||
* depc: a2, original value saved on stack (PT_DEPC)
|
||||
* excsave_1: a3
|
||||
*
|
||||
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
|
||||
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
|
||||
*/
|
||||
|
||||
|
||||
ENTRY(fast_unaligned)
|
||||
|
||||
/* Note: We don't expect the address to be aligned on a word
|
||||
* boundary. After all, the processor generated that exception
|
||||
* and it would be a hardware fault.
|
||||
*/
|
||||
|
||||
/* Save some working register */
|
||||
|
||||
s32i a4, a2, PT_AREG4
|
||||
s32i a5, a2, PT_AREG5
|
||||
s32i a6, a2, PT_AREG6
|
||||
s32i a7, a2, PT_AREG7
|
||||
s32i a8, a2, PT_AREG8
|
||||
|
||||
rsr a0, DEPC
|
||||
xsr a3, EXCSAVE_1
|
||||
s32i a0, a2, PT_AREG2
|
||||
s32i a3, a2, PT_AREG3
|
||||
|
||||
/* Keep value of SAR in a0 */
|
||||
|
||||
rsr a0, SAR
|
||||
rsr a8, EXCVADDR # load unaligned memory address
|
||||
|
||||
/* Now, identify one of the following load/store instructions.
|
||||
*
|
||||
* The only possible danger of a double exception on the
|
||||
* following l32i instructions is kernel code in vmalloc
|
||||
* memory. The processor was just executing at the EPC_1
|
||||
* address, and indeed, already fetched the instruction. That
|
||||
* guarantees a TLB mapping, which hasn't been replaced by
|
||||
* this unaligned exception handler that uses only static TLB
|
||||
* mappings. However, high-level interrupt handlers might
|
||||
* modify TLB entries, so for the generic case, we register a
|
||||
* TABLE_FIXUP handler here, too.
|
||||
*/
|
||||
|
||||
/* a3...a6 saved on stack, a2 = SP */
|
||||
|
||||
/* Extract the instruction that caused the unaligned access. */
|
||||
|
||||
rsr a7, EPC_1 # load exception address
|
||||
movi a3, ~3
|
||||
and a3, a3, a7 # mask lower bits
|
||||
|
||||
l32i a4, a3, 0 # load 2 words
|
||||
l32i a5, a3, 4
|
||||
|
||||
__ssa8 a7
|
||||
__src_b a4, a4, a5 # a4 has the instruction
|
||||
|
||||
/* Analyze the instruction (load or store?). */
|
||||
|
||||
extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
|
||||
|
||||
#if XCHAL_HAVE_NARROW
|
||||
_beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
|
||||
addi a6, a5, -OP0_S32I_N
|
||||
_beqz a6, .Lstore # S32I.N, do a store
|
||||
#endif
|
||||
/* 'store indicator bit' not set, jump */
|
||||
_bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
|
||||
|
||||
/* Store: Jump to table entry to get the value in the source register.*/
|
||||
|
||||
.Lstore:movi a5, .Lstore_table # table
|
||||
extui a6, a4, INSN_T, 4 # get source register
|
||||
addx8 a5, a6, a5
|
||||
jx a5 # jump into table
|
||||
|
||||
/* Invalid instruction, CRITICAL! */
|
||||
.Linvalid_instruction_load:
|
||||
j .Linvalid_instruction
|
||||
|
||||
/* Load: Load memory address. */
|
||||
|
||||
.Lload: movi a3, ~3
|
||||
and a3, a3, a8 # align memory address
|
||||
|
||||
__ssa8 a8
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
addi a3, a3, 8
|
||||
l32e a5, a3, -8
|
||||
l32e a6, a3, -4
|
||||
#else
|
||||
l32i a5, a3, 0
|
||||
l32i a6, a3, 4
|
||||
#endif
|
||||
__src_b a3, a5, a6 # a3 has the data word
|
||||
|
||||
#if XCHAL_HAVE_NARROW
|
||||
addi a7, a7, 2 # increment PC (assume 16-bit insn)
|
||||
|
||||
extui a5, a4, INSN_OP0, 4
|
||||
_beqi a5, OP0_L32I_N, 1f # l32i.n: jump
|
||||
|
||||
addi a7, a7, 1
|
||||
#else
|
||||
addi a7, a7, 3
|
||||
#endif
|
||||
|
||||
extui a5, a4, INSN_OP1, 4
|
||||
_beqi a5, OP1_L32I, 1f # l32i: jump
|
||||
|
||||
extui a3, a3, 0, 16 # extract lower 16 bits
|
||||
_beqi a5, OP1_L16UI, 1f
|
||||
addi a5, a5, -OP1_L16SI
|
||||
_bnez a5, .Linvalid_instruction_load
|
||||
|
||||
/* sign extend value */
|
||||
|
||||
slli a3, a3, 16
|
||||
srai a3, a3, 16
|
||||
|
||||
/* Set target register. */
|
||||
|
||||
1:
|
||||
|
||||
#if XCHAL_HAVE_LOOP
|
||||
rsr a3, LEND # check if we reached LEND
|
||||
bne a7, a3, 1f
|
||||
rsr a3, LCOUNT # and LCOUNT != 0
|
||||
beqz a3, 1f
|
||||
addi a3, a3, -1 # decrement LCOUNT and set
|
||||
rsr a7, LBEG # set PC to LBEGIN
|
||||
wsr a3, LCOUNT
|
||||
#endif
|
||||
|
||||
1: wsr a7, EPC_1 # skip load instruction
|
||||
extui a4, a4, INSN_T, 4 # extract target register
|
||||
movi a5, .Lload_table
|
||||
addx8 a4, a4, a5
|
||||
jx a4 # jump to entry for target register
|
||||
|
||||
.align 8
|
||||
.Lload_table:
|
||||
s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
|
||||
mov a1, a3; _j .Lexit; .align 8 # fishy??
|
||||
s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
|
||||
s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
|
||||
s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
|
||||
s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
|
||||
s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
|
||||
s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
|
||||
s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
|
||||
mov a9, a3 ; _j .Lexit; .align 8
|
||||
mov a10, a3 ; _j .Lexit; .align 8
|
||||
mov a11, a3 ; _j .Lexit; .align 8
|
||||
mov a12, a3 ; _j .Lexit; .align 8
|
||||
mov a13, a3 ; _j .Lexit; .align 8
|
||||
mov a14, a3 ; _j .Lexit; .align 8
|
||||
mov a15, a3 ; _j .Lexit; .align 8
|
||||
|
||||
.Lstore_table:
|
||||
l32i a3, a2, PT_AREG0; _j 1f; .align 8
|
||||
mov a3, a1; _j 1f; .align 8 # fishy??
|
||||
l32i a3, a2, PT_AREG2; _j 1f; .align 8
|
||||
l32i a3, a2, PT_AREG3; _j 1f; .align 8
|
||||
l32i a3, a2, PT_AREG4; _j 1f; .align 8
|
||||
l32i a3, a2, PT_AREG5; _j 1f; .align 8
|
||||
l32i a3, a2, PT_AREG6; _j 1f; .align 8
|
||||
l32i a3, a2, PT_AREG7; _j 1f; .align 8
|
||||
l32i a3, a2, PT_AREG8; _j 1f; .align 8
|
||||
mov a3, a9 ; _j 1f; .align 8
|
||||
mov a3, a10 ; _j 1f; .align 8
|
||||
mov a3, a11 ; _j 1f; .align 8
|
||||
mov a3, a12 ; _j 1f; .align 8
|
||||
mov a3, a13 ; _j 1f; .align 8
|
||||
mov a3, a14 ; _j 1f; .align 8
|
||||
mov a3, a15 ; _j 1f; .align 8
|
||||
|
||||
1: # a7: instruction pointer, a4: instruction, a3: value
|
||||
|
||||
movi a6, 0 # mask: ffffffff:00000000
|
||||
|
||||
#if XCHAL_HAVE_NARROW
|
||||
addi a7, a7, 2 # incr. PC,assume 16-bit instruction
|
||||
|
||||
extui a5, a4, INSN_OP0, 4 # extract OP0
|
||||
addi a5, a5, -OP0_S32I_N
|
||||
_beqz a5, 1f # s32i.n: jump
|
||||
|
||||
addi a7, a7, 1 # increment PC, 32-bit instruction
|
||||
#else
|
||||
addi a7, a7, 3 # increment PC, 32-bit instruction
|
||||
#endif
|
||||
|
||||
extui a5, a4, INSN_OP1, 4 # extract OP1
|
||||
_beqi a5, OP1_S32I, 1f # jump if 32 bit store
|
||||
_bnei a5, OP1_S16I, .Linvalid_instruction_store
|
||||
|
||||
movi a5, -1
|
||||
__extl a3, a3 # get 16-bit value
|
||||
__exth a6, a5 # get 16-bit mask ffffffff:ffff0000
|
||||
|
||||
/* Get memory address */
|
||||
|
||||
1:
|
||||
#if XCHAL_HAVE_LOOP
|
||||
rsr a3, LEND # check if we reached LEND
|
||||
bne a7, a3, 1f
|
||||
rsr a3, LCOUNT # and LCOUNT != 0
|
||||
beqz a3, 1f
|
||||
addi a3, a3, -1 # decrement LCOUNT and set
|
||||
rsr a7, LBEG # set PC to LBEGIN
|
||||
wsr a3, LCOUNT
|
||||
#endif
|
||||
|
||||
1: wsr a7, EPC_1 # skip store instruction
|
||||
movi a4, ~3
|
||||
and a4, a4, a8 # align memory address
|
||||
|
||||
/* Insert value into memory */
|
||||
|
||||
movi a5, -1 # mask: ffffffff:XXXX0000
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
addi a4, a4, 8
|
||||
#endif
|
||||
|
||||
__ssa8r a8
|
||||
__src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
|
||||
__src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
l32e a5, a4, -8
|
||||
#else
|
||||
l32i a5, a4, 0 # load lower address word
|
||||
#endif
|
||||
and a5, a5, a7 # mask
|
||||
__sh a7, a3 # shift value
|
||||
or a5, a5, a7 # or with original value
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
s32e a5, a4, -8
|
||||
l32e a7, a4, -4
|
||||
#else
|
||||
s32i a5, a4, 0 # store
|
||||
l32i a7, a4, 4 # same for upper address word
|
||||
#endif
|
||||
__sl a5, a3
|
||||
and a6, a7, a6
|
||||
or a6, a6, a5
|
||||
#ifdef UNALIGNED_USER_EXCEPTION
|
||||
s32e a6, a4, -4
|
||||
#else
|
||||
s32i a6, a4, 4
|
||||
#endif
|
||||
|
||||
/* Done. restore stack and return */
|
||||
|
||||
.Lexit:
|
||||
movi a4, 0
|
||||
rsr a3, EXCSAVE_1
|
||||
s32i a4, a3, EXC_TABLE_FIXUP
|
||||
|
||||
/* Restore working register */
|
||||
|
||||
l32i a7, a2, PT_AREG7
|
||||
l32i a6, a2, PT_AREG6
|
||||
l32i a5, a2, PT_AREG5
|
||||
l32i a4, a2, PT_AREG4
|
||||
l32i a3, a2, PT_AREG3
|
||||
|
||||
/* restore SAR and return */
|
||||
|
||||
wsr a0, SAR
|
||||
l32i a0, a2, PT_AREG0
|
||||
l32i a2, a2, PT_AREG2
|
||||
rfe
|
||||
|
||||
/* We cannot handle this exception. */
|
||||
|
||||
.extern _kernel_exception
|
||||
.Linvalid_instruction_store:
|
||||
.Linvalid_instruction:
|
||||
|
||||
/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
|
||||
|
||||
l32i a8, a2, PT_AREG8
|
||||
l32i a7, a2, PT_AREG7
|
||||
l32i a6, a2, PT_AREG6
|
||||
l32i a5, a2, PT_AREG5
|
||||
l32i a4, a2, PT_AREG4
|
||||
wsr a0, SAR
|
||||
mov a1, a2
|
||||
|
||||
rsr a0, PS
|
||||
bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
|
||||
|
||||
movi a0, _kernel_exception
|
||||
jx a0
|
||||
|
||||
1: movi a0, _user_exception
|
||||
jx a0
|
||||
|
||||
|
||||
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
|
||||
|
||||
94
arch/xtensa/kernel/asm-offsets.c
Normal file
94
arch/xtensa/kernel/asm-offsets.c
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/asm-offsets.c
|
||||
*
|
||||
* Generates definitions from c-type structures used by assembly sources.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2005 Tensilica Inc.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
|
||||
#define BLANK() asm volatile("\n->" : : )
|
||||
|
||||
int main(void)
|
||||
{
|
||||
/* struct pt_regs */
|
||||
DEFINE(PT_PC, offsetof (struct pt_regs, pc));
|
||||
DEFINE(PT_PS, offsetof (struct pt_regs, ps));
|
||||
DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
|
||||
DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
|
||||
DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
|
||||
DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
|
||||
DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
|
||||
DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
|
||||
DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
|
||||
DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
|
||||
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
|
||||
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
|
||||
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
|
||||
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
|
||||
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
|
||||
DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
|
||||
DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
|
||||
DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
|
||||
DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
|
||||
DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
|
||||
DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
|
||||
DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
|
||||
DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
|
||||
DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
|
||||
DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
|
||||
DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
|
||||
DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
|
||||
DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
|
||||
DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
|
||||
DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
|
||||
DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
|
||||
DEFINE(PT_SIZE, sizeof(struct pt_regs));
|
||||
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
|
||||
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
|
||||
BLANK();
|
||||
|
||||
/* struct task_struct */
|
||||
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
|
||||
DEFINE(TASK_MM, offsetof (struct task_struct, mm));
|
||||
DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
|
||||
DEFINE(TASK_PID, offsetof (struct task_struct, pid));
|
||||
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
|
||||
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
|
||||
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
|
||||
BLANK();
|
||||
|
||||
/* struct thread_info (offset from start_struct) */
|
||||
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
|
||||
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
|
||||
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
|
||||
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
|
||||
BLANK();
|
||||
|
||||
/* struct mm_struct */
|
||||
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
|
||||
DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
|
||||
DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
|
||||
BLANK();
|
||||
DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
201
arch/xtensa/kernel/coprocessor.S
Normal file
201
arch/xtensa/kernel/coprocessor.S
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/coprocessor.S
|
||||
*
|
||||
* Xtensa processor configuration-specific table of coprocessor and
|
||||
* other custom register layout information.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2003 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
|
||||
*/
|
||||
|
||||
/*
|
||||
* This module contains a table that describes the layout of the various
|
||||
* custom registers and states associated with each coprocessor, as well
|
||||
* as those not associated with any coprocessor ("extra state").
|
||||
* This table is included with core dumps and is available via the ptrace
|
||||
* interface, allowing the layout of such register/state information to
|
||||
* be modified in the kernel without affecting the debugger. Each
|
||||
* register or state is identified using a 32-bit "libdb target number"
|
||||
* assigned when the Xtensa processor is generated.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#if XCHAL_HAVE_CP
|
||||
|
||||
#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
|
||||
|
||||
ENTRY(release_coprocessors)
|
||||
|
||||
entry a1, 16
|
||||
# a2: task
|
||||
movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
|
||||
movi a4, coprocessor_info+CP_LAST # a4: owner-table
|
||||
# a5: tmp
|
||||
movi a6, 0 # a6: 0
|
||||
rsil a7, LOCKLEVEL # a7: PS
|
||||
|
||||
1: /* Check if task is coprocessor owner of coprocessor[i]. */
|
||||
|
||||
l32i a5, a4, COPROCESSOR_INFO_OWNER
|
||||
srli a3, a3, 1
|
||||
beqz a3, 1f
|
||||
addi a4, a4, -8
|
||||
beq a2, a5, 1b
|
||||
|
||||
/* Found an entry: Clear entry CPENABLE bit to disable CP. */
|
||||
|
||||
rsr a5, CPENABLE
|
||||
s32i a6, a4, COPROCESSOR_INFO_OWNER
|
||||
xor a5, a3, a5
|
||||
wsr a5, CPENABLE
|
||||
|
||||
bnez a3, 1b
|
||||
|
||||
1: wsr a7, PS
|
||||
rsync
|
||||
retw
|
||||
|
||||
|
||||
ENTRY(disable_coprocessor)
|
||||
entry sp, 16
|
||||
rsil a7, LOCKLEVEL
|
||||
rsr a3, CPENABLE
|
||||
movi a4, 1
|
||||
ssl a2
|
||||
sll a4, a4
|
||||
and a4, a3, a4
|
||||
xor a3, a3, a4
|
||||
wsr a3, CPENABLE
|
||||
wsr a7, PS
|
||||
rsync
|
||||
retw
|
||||
|
||||
ENTRY(enable_coprocessor)
|
||||
entry sp, 16
|
||||
rsil a7, LOCKLEVEL
|
||||
rsr a3, CPENABLE
|
||||
movi a4, 1
|
||||
ssl a2
|
||||
sll a4, a4
|
||||
or a3, a3, a4
|
||||
wsr a3, CPENABLE
|
||||
wsr a7, PS
|
||||
rsync
|
||||
retw
|
||||
|
||||
#endif
|
||||
|
||||
ENTRY(save_coprocessor_extra)
|
||||
entry sp, 16
|
||||
xchal_extra_store_funcbody
|
||||
retw
|
||||
|
||||
ENTRY(restore_coprocessor_extra)
|
||||
entry sp, 16
|
||||
xchal_extra_load_funcbody
|
||||
retw
|
||||
|
||||
ENTRY(save_coprocessor_registers)
|
||||
entry sp, 16
|
||||
xchal_cpi_store_funcbody
|
||||
retw
|
||||
|
||||
ENTRY(restore_coprocessor_registers)
|
||||
entry sp, 16
|
||||
xchal_cpi_load_funcbody
|
||||
retw
|
||||
|
||||
|
||||
/*
|
||||
* The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
|
||||
* describe the contents of coprocessor & extra save areas in terms of
|
||||
* undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
|
||||
* latter macros here; they expand into a table of the format we want.
|
||||
* The general format is:
|
||||
*
|
||||
* CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
|
||||
* bitmask, rsv2, rsv3)
|
||||
* CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
|
||||
* bitmask, rsv2, rsv3)
|
||||
* CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
|
||||
* numentries, contentsize, regname_base,
|
||||
* regfile_name, rsv2, rsv3)
|
||||
*
|
||||
* For this table, we only care about the <libdbnum>, <offset> and <size>
|
||||
* fields.
|
||||
*/
|
||||
|
||||
/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
|
||||
|
||||
#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
|
||||
bitmask, rsv2, rsv3) \
|
||||
reg_entry libdbnum, offset, size ;
|
||||
#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
|
||||
bitmask, rsv2, rsv3) \
|
||||
reg_entry libdbnum, offset, size ;
|
||||
#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
|
||||
numentries, contentsize, regname_base, \
|
||||
regfile_name, rsv2, rsv3) \
|
||||
reg_entry libdbnum, offset, size ;
|
||||
|
||||
/* A single table entry: */
|
||||
.macro reg_entry libdbnum, offset, size
|
||||
.ifne (__last_offset-(__last_group_offset+\offset))
|
||||
/* padding entry */
|
||||
.word (0xFC000000+__last_offset-(__last_group_offset+\offset))
|
||||
.endif
|
||||
.word \libdbnum /* actual entry */
|
||||
.set __last_offset, __last_group_offset+\offset+\size
|
||||
.endm /* reg_entry */
|
||||
|
||||
|
||||
/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
|
||||
.macro reg_group cpnum, num_entries, align
|
||||
.set __last_group_offset, (__last_offset + \align- 1) & -\align
|
||||
.ifne \num_entries
|
||||
.word 0xFD000000+(\cpnum<<16)+\num_entries
|
||||
.endif
|
||||
.endm /* reg_group */
|
||||
|
||||
/*
|
||||
* Register info tables.
|
||||
*/
|
||||
|
||||
.section .rodata, "a"
|
||||
.globl _xtensa_reginfo_tables
|
||||
.globl _xtensa_reginfo_table_size
|
||||
.align 4
|
||||
_xtensa_reginfo_table_size:
|
||||
.word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
|
||||
|
||||
_xtensa_reginfo_tables:
|
||||
.set __last_offset, 0
|
||||
reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
|
||||
XCHAL_EXTRA_SA_CONTENTS_LIBDB
|
||||
reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
|
||||
XCHAL_CP0_SA_CONTENTS_LIBDB
|
||||
reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
|
||||
XCHAL_CP1_SA_CONTENTS_LIBDB
|
||||
reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
|
||||
XCHAL_CP2_SA_CONTENTS_LIBDB
|
||||
reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
|
||||
XCHAL_CP3_SA_CONTENTS_LIBDB
|
||||
reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
|
||||
XCHAL_CP4_SA_CONTENTS_LIBDB
|
||||
reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
|
||||
XCHAL_CP5_SA_CONTENTS_LIBDB
|
||||
reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
|
||||
XCHAL_CP6_SA_CONTENTS_LIBDB
|
||||
reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
|
||||
XCHAL_CP7_SA_CONTENTS_LIBDB
|
||||
.word 0xFC000000 /* invalid register number,marks end of table*/
|
||||
_xtensa_reginfo_table_end:
|
||||
|
||||
1996
arch/xtensa/kernel/entry.S
Normal file
1996
arch/xtensa/kernel/entry.S
Normal file
File diff suppressed because it is too large
Load Diff
237
arch/xtensa/kernel/head.S
Normal file
237
arch/xtensa/kernel/head.S
Normal file
@@ -0,0 +1,237 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/head.S
|
||||
*
|
||||
* Xtensa Processor startup code.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* Kevin Chea
|
||||
*/
|
||||
|
||||
#include <xtensa/cacheasm.h>
|
||||
#include <linux/config.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* This module contains the entry code for kernel images. It performs the
|
||||
* minimal setup needed to call the generic C routines.
|
||||
*
|
||||
* Prerequisites:
|
||||
*
|
||||
* - The kernel image has been loaded to the actual address where it was
|
||||
* compiled to.
|
||||
* - a2 contains either 0 or a pointer to a list of boot parameters.
|
||||
* (see setup.c for more details)
|
||||
*
|
||||
*/
|
||||
|
||||
.macro iterate from, to , cmd
|
||||
.ifeq ((\to - \from) & ~0xfff)
|
||||
\cmd \from
|
||||
iterate "(\from+1)", \to, \cmd
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* _start
|
||||
*
|
||||
* The bootloader passes a pointer to a list of boot parameters in a2.
|
||||
*/
|
||||
|
||||
/* The first bytes of the kernel image must be an instruction, so we
|
||||
* manually allocate and define the literal constant we need for a jx
|
||||
* instruction.
|
||||
*/
|
||||
|
||||
.section .head.text, "ax"
|
||||
.globl _start
|
||||
_start: _j 2f
|
||||
.align 4
|
||||
1: .word _startup
|
||||
2: l32r a0, 1b
|
||||
jx a0
|
||||
|
||||
.text
|
||||
.align 4
|
||||
_startup:
|
||||
|
||||
/* Disable interrupts and exceptions. */
|
||||
|
||||
movi a0, XCHAL_PS_EXCM_MASK
|
||||
wsr a0, PS
|
||||
|
||||
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
|
||||
|
||||
wsr a2, EXCSAVE_1
|
||||
|
||||
/* Start with a fresh windowbase and windowstart. */
|
||||
|
||||
movi a1, 1
|
||||
movi a0, 0
|
||||
wsr a1, WINDOWSTART
|
||||
wsr a0, WINDOWBASE
|
||||
rsync
|
||||
|
||||
/* Set a0 to 0 for the remaining initialization. */
|
||||
|
||||
movi a0, 0
|
||||
|
||||
/* Clear debugging registers. */
|
||||
|
||||
#if XCHAL_HAVE_DEBUG
|
||||
wsr a0, IBREAKENABLE
|
||||
wsr a0, ICOUNT
|
||||
movi a1, 15
|
||||
wsr a0, ICOUNTLEVEL
|
||||
|
||||
.macro reset_dbreak num
|
||||
wsr a0, DBREAKC + \num
|
||||
.endm
|
||||
|
||||
iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
|
||||
#endif
|
||||
|
||||
/* Clear CCOUNT (not really necessary, but nice) */
|
||||
|
||||
wsr a0, CCOUNT # not really necessary, but nice
|
||||
|
||||
/* Disable zero-loops. */
|
||||
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
wsr a0, LCOUNT
|
||||
#endif
|
||||
|
||||
/* Disable all timers. */
|
||||
|
||||
.macro reset_timer num
|
||||
wsr a0, CCOMPARE_0 + \num
|
||||
.endm
|
||||
iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
|
||||
|
||||
/* Interrupt initialization. */
|
||||
|
||||
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
|
||||
wsr a0, INTENABLE
|
||||
wsr a2, INTCLEAR
|
||||
|
||||
/* Disable coprocessors. */
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
wsr a0, CPENABLE
|
||||
#endif
|
||||
|
||||
/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
|
||||
*
|
||||
* Note: PS.EXCM must be cleared before using any loop
|
||||
* instructions; otherwise, they are silently disabled, and
|
||||
* at most one iteration of the loop is executed.
|
||||
*/
|
||||
|
||||
movi a1, 1
|
||||
wsr a1, PS
|
||||
rsync
|
||||
|
||||
/* Initialize the caches.
|
||||
* Does not include flushing writeback d-cache.
|
||||
* a6, a7 are just working registers (clobbered).
|
||||
*/
|
||||
|
||||
icache_reset a2, a3
|
||||
dcache_reset a2, a3
|
||||
|
||||
/* Unpack data sections
|
||||
*
|
||||
* The linker script used to build the Linux kernel image
|
||||
* creates a table located at __boot_reloc_table_start
|
||||
* that contans the information what data needs to be unpacked.
|
||||
*
|
||||
* Uses a2-a7.
|
||||
*/
|
||||
|
||||
movi a2, __boot_reloc_table_start
|
||||
movi a3, __boot_reloc_table_end
|
||||
|
||||
1: beq a2, a3, 3f # no more entries?
|
||||
l32i a4, a2, 0 # start destination (in RAM)
|
||||
l32i a5, a2, 4 # end desination (in RAM)
|
||||
l32i a6, a2, 8 # start source (in ROM)
|
||||
addi a2, a2, 12 # next entry
|
||||
beq a4, a5, 1b # skip, empty entry
|
||||
beq a4, a6, 1b # skip, source and dest. are the same
|
||||
|
||||
2: l32i a7, a6, 0 # load word
|
||||
addi a6, a6, 4
|
||||
s32i a7, a4, 0 # store word
|
||||
addi a4, a4, 4
|
||||
bltu a4, a5, 2b
|
||||
j 1b
|
||||
|
||||
3:
|
||||
/* All code and initialized data segments have been copied.
|
||||
* Now clear the BSS segment.
|
||||
*/
|
||||
|
||||
movi a2, _bss_start # start of BSS
|
||||
movi a3, _bss_end # end of BSS
|
||||
|
||||
1: addi a2, a2, 4
|
||||
s32i a0, a2, 0
|
||||
blt a2, a3, 1b
|
||||
|
||||
#if XCHAL_DCACHE_IS_WRITEBACK
|
||||
|
||||
/* After unpacking, flush the writeback cache to memory so the
|
||||
* instructions/data are available.
|
||||
*/
|
||||
|
||||
dcache_writeback_all a2, a3
|
||||
#endif
|
||||
|
||||
/* Setup stack and enable window exceptions (keep irqs disabled) */
|
||||
|
||||
movi a1, init_thread_union
|
||||
addi a1, a1, KERNEL_STACK_SIZE
|
||||
|
||||
movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
|
||||
wsr a2, PS # (enable reg-windows; progmode stack)
|
||||
rsync
|
||||
|
||||
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
|
||||
|
||||
movi a2, debug_exception
|
||||
wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
|
||||
|
||||
/* Set up EXCSAVE[1] to point to the exc_table. */
|
||||
|
||||
movi a6, exc_table
|
||||
xsr a6, EXCSAVE_1
|
||||
|
||||
/* init_arch kick-starts the linux kernel */
|
||||
|
||||
movi a4, init_arch
|
||||
callx4 a4
|
||||
|
||||
movi a4, start_kernel
|
||||
callx4 a4
|
||||
|
||||
should_never_return:
|
||||
j should_never_return
|
||||
|
||||
/* Define some common data structures here. We define them
|
||||
* here in this assembly file due to their unusual alignment
|
||||
* requirements.
|
||||
*/
|
||||
|
||||
.comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
|
||||
.comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
|
||||
.comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
|
||||
.comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
|
||||
|
||||
192
arch/xtensa/kernel/irq.c
Normal file
192
arch/xtensa/kernel/irq.c
Normal file
@@ -0,0 +1,192 @@
|
||||
/*
|
||||
* linux/arch/xtensa/kernel/irq.c
|
||||
*
|
||||
* Xtensa built-in interrupt controller and some generic functions copied
|
||||
* from i386.
|
||||
*
|
||||
* Copyright (C) 2002 - 2005 Tensilica, Inc.
|
||||
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
||||
*
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Kevin Chea
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/platform.h>
|
||||
|
||||
static void enable_xtensa_irq(unsigned int irq);
|
||||
static void disable_xtensa_irq(unsigned int irq);
|
||||
static void mask_and_ack_xtensa(unsigned int irq);
|
||||
static void end_xtensa_irq(unsigned int irq);
|
||||
|
||||
static unsigned int cached_irq_mask;
|
||||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
*/
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk("unexpected IRQ trap at vector %02x\n", irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* do_IRQ handles all normal device IRQ's (the special
|
||||
* SMP cross-CPU interrupts have their own specific
|
||||
* handlers).
|
||||
*/
|
||||
|
||||
unsigned int do_IRQ(int irq, struct pt_regs *regs)
|
||||
{
|
||||
irq_enter();
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
/* Debugging check for stack overflow: is there less than 1KB free? */
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
__asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
|
||||
sp &= THREAD_SIZE - 1;
|
||||
|
||||
if (unlikely(sp < (sizeof(thread_info) + 1024)))
|
||||
printk("Stack overflow in do_IRQ: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
}
|
||||
#endif
|
||||
|
||||
__do_IRQ(irq, regs);
|
||||
|
||||
irq_exit();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic, controller-independent functions:
|
||||
*/
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
struct irqaction * action;
|
||||
unsigned long flags;
|
||||
|
||||
if (i == 0) {
|
||||
seq_printf(p, " ");
|
||||
for (j=0; j<NR_CPUS; j++)
|
||||
if (cpu_online(j))
|
||||
seq_printf(p, "CPU%d ",j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
if (i < NR_IRQS) {
|
||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||
action = irq_desc[i].action;
|
||||
if (!action)
|
||||
goto skip;
|
||||
seq_printf(p, "%3d: ",i);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for (j = 0; j < NR_CPUS; j++)
|
||||
if (cpu_online(j))
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
||||
for (action=action->next; action; action = action->next)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
} else if (i == NR_IRQS) {
|
||||
seq_printf(p, "NMI: ");
|
||||
for (j = 0; j < NR_CPUS; j++)
|
||||
if (cpu_online(j))
|
||||
seq_printf(p, "%10u ", nmi_count(j));
|
||||
seq_putc(p, '\n');
|
||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
/* shutdown is same as "disable" */
|
||||
#define shutdown_xtensa_irq disable_xtensa_irq
|
||||
|
||||
static unsigned int startup_xtensa_irq(unsigned int irq)
|
||||
{
|
||||
enable_xtensa_irq(irq);
|
||||
return 0; /* never anything pending */
|
||||
}
|
||||
|
||||
static struct hw_interrupt_type xtensa_irq_type = {
|
||||
"Xtensa-IRQ",
|
||||
startup_xtensa_irq,
|
||||
shutdown_xtensa_irq,
|
||||
enable_xtensa_irq,
|
||||
disable_xtensa_irq,
|
||||
mask_and_ack_xtensa,
|
||||
end_xtensa_irq
|
||||
};
|
||||
|
||||
static inline void mask_irq(unsigned int irq)
|
||||
{
|
||||
cached_irq_mask &= ~(1 << irq);
|
||||
set_sr (cached_irq_mask, INTENABLE);
|
||||
}
|
||||
|
||||
static inline void unmask_irq(unsigned int irq)
|
||||
{
|
||||
cached_irq_mask |= 1 << irq;
|
||||
set_sr (cached_irq_mask, INTENABLE);
|
||||
}
|
||||
|
||||
static void disable_xtensa_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_save_flags(flags);
|
||||
mask_irq(irq);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void enable_xtensa_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_save_flags(flags);
|
||||
unmask_irq(irq);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void mask_and_ack_xtensa(unsigned int irq)
|
||||
{
|
||||
disable_xtensa_irq(irq);
|
||||
}
|
||||
|
||||
static void end_xtensa_irq(unsigned int irq)
|
||||
{
|
||||
enable_xtensa_irq(irq);
|
||||
}
|
||||
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i < XTENSA_NR_IRQS; i++)
|
||||
irq_desc[i].handler = &xtensa_irq_type;
|
||||
|
||||
cached_irq_mask = 0;
|
||||
|
||||
platform_init_irq();
|
||||
}
|
||||
78
arch/xtensa/kernel/module.c
Normal file
78
arch/xtensa/kernel/module.c
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/platform.c
|
||||
*
|
||||
* Module support.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
LIST_HEAD(module_buf_list);
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
panic("module_alloc not implemented");
|
||||
}
|
||||
|
||||
void module_free(struct module *mod, void *module_region)
|
||||
{
|
||||
panic("module_free not implemented");
|
||||
}
|
||||
|
||||
int module_frob_arch_sections(Elf32_Ehdr *hdr,
|
||||
Elf32_Shdr *sechdrs,
|
||||
char *secstrings,
|
||||
struct module *me)
|
||||
{
|
||||
panic("module_frob_arch_sections not implemented");
|
||||
}
|
||||
|
||||
int apply_relocate(Elf32_Shdr *sechdrs,
|
||||
const char *strtab,
|
||||
unsigned int symindex,
|
||||
unsigned int relsec,
|
||||
struct module *module)
|
||||
{
|
||||
panic ("apply_relocate not implemented");
|
||||
}
|
||||
|
||||
int apply_relocate_add(Elf32_Shdr *sechdrs,
|
||||
const char *strtab,
|
||||
unsigned int symindex,
|
||||
unsigned int relsec,
|
||||
struct module *module)
|
||||
{
|
||||
panic("apply_relocate_add not implemented");
|
||||
}
|
||||
|
||||
int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *me)
|
||||
{
|
||||
panic ("module_finalize not implemented");
|
||||
}
|
||||
|
||||
void module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
panic("module_arch_cleanup not implemented");
|
||||
}
|
||||
|
||||
struct bug_entry *module_find_bug(unsigned long bugaddr)
|
||||
{
|
||||
panic("module_find_bug not implemented");
|
||||
}
|
||||
73
arch/xtensa/kernel/pci-dma.c
Normal file
73
arch/xtensa/kernel/pci-dma.c
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* arch/xtensa/pci-dma.c
|
||||
*
|
||||
* DMA coherent memory allocation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* Copyright (C) 2002 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Based on version for i386.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* Note: We assume that the full memory space is always mapped to 'kseg'
|
||||
* Otherwise we have to use page attributes (not implemented).
|
||||
*/
|
||||
|
||||
void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
/* ignore region speicifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
|
||||
gfp |= GFP_DMA;
|
||||
ret = (void *)__get_free_pages(gfp, get_order(size));
|
||||
|
||||
if (ret != NULL) {
|
||||
memset(ret, 0, size);
|
||||
*handle = virt_to_bus(ret);
|
||||
}
|
||||
return (void*) BYPASS_ADDR((unsigned long)ret);
|
||||
}
|
||||
|
||||
void dma_free_coherent(struct device *hwdev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
|
||||
}
|
||||
|
||||
|
||||
void consistent_sync(void *vaddr, size_t size, int direction)
|
||||
{
|
||||
switch (direction) {
|
||||
case PCI_DMA_NONE:
|
||||
BUG();
|
||||
case PCI_DMA_FROMDEVICE: /* invalidate only */
|
||||
__invalidate_dcache_range((unsigned long)vaddr,
|
||||
(unsigned long)size);
|
||||
break;
|
||||
|
||||
case PCI_DMA_TODEVICE: /* writeback only */
|
||||
case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
||||
__flush_invalidate_dcache_range((unsigned long)vaddr,
|
||||
(unsigned long)size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
563
arch/xtensa/kernel/pci.c
Normal file
563
arch/xtensa/kernel/pci.c
Normal file
File diff suppressed because it is too large
Load Diff
49
arch/xtensa/kernel/platform.c
Normal file
49
arch/xtensa/kernel/platform.c
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/platform.c
|
||||
*
|
||||
* Default platform functions.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2005 Tensilica Inc.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/time.h>
|
||||
#include <asm/platform.h>
|
||||
#include <asm/timex.h>
|
||||
|
||||
#define _F(r,f,a,b) \
|
||||
r __platform_##f a b; \
|
||||
r platform_##f a __attribute__((weak, alias("__platform_"#f)))
|
||||
|
||||
/*
|
||||
* Default functions that are used if no platform specific function is defined.
|
||||
* (Please, refer to include/asm-xtensa/platform.h for more information)
|
||||
*/
|
||||
|
||||
_F(void, setup, (char** cmd), { });
|
||||
_F(void, init_irq, (void), { });
|
||||
_F(void, restart, (void), { while(1); });
|
||||
_F(void, halt, (void), { while(1); });
|
||||
_F(void, power_off, (void), { while(1); });
|
||||
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
|
||||
_F(void, heartbeat, (void), { });
|
||||
_F(int, pcibios_fixup, (void), { return 0; });
|
||||
_F(int, get_rtc_time, (time_t* t), { return 0; });
|
||||
_F(int, set_rtc_time, (time_t t), { return 0; });
|
||||
|
||||
#if CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
_F(void, calibrate_ccount, (void),
|
||||
{
|
||||
printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
|
||||
ccount_per_jiffy = 100 * (1000000UL/HZ);
|
||||
});
|
||||
#endif
|
||||
|
||||
482
arch/xtensa/kernel/process.c
Normal file
482
arch/xtensa/kernel/process.c
Normal file
@@ -0,0 +1,482 @@
|
||||
// TODO verify coprocessor handling
|
||||
/*
|
||||
* arch/xtensa/kernel/process.c
|
||||
*
|
||||
* Xtensa Processor version.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
||||
* Kevin Chea
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mqueue.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/platform.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/offsets.h>
|
||||
#include <asm/coprocessor.h>
|
||||
|
||||
extern void ret_from_fork(void);
|
||||
|
||||
static struct fs_struct init_fs = INIT_FS;
|
||||
static struct files_struct init_files = INIT_FILES;
|
||||
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
||||
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
||||
struct mm_struct init_mm = INIT_MM(init_mm);
|
||||
EXPORT_SYMBOL(init_mm);
|
||||
|
||||
union thread_union init_thread_union
|
||||
__attribute__((__section__(".data.init_task"))) =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
|
||||
struct task_struct init_task = INIT_TASK(init_task);
|
||||
EXPORT_SYMBOL(init_task);
|
||||
|
||||
struct task_struct *current_set[NR_CPUS] = {&init_task, };
|
||||
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
|
||||
/*
|
||||
* Coprocessor ownership.
|
||||
*/
|
||||
|
||||
coprocessor_info_t coprocessor_info[] = {
|
||||
{ 0, XTENSA_CPE_CP0_OFFSET },
|
||||
{ 0, XTENSA_CPE_CP1_OFFSET },
|
||||
{ 0, XTENSA_CPE_CP2_OFFSET },
|
||||
{ 0, XTENSA_CPE_CP3_OFFSET },
|
||||
{ 0, XTENSA_CPE_CP4_OFFSET },
|
||||
{ 0, XTENSA_CPE_CP5_OFFSET },
|
||||
{ 0, XTENSA_CPE_CP6_OFFSET },
|
||||
{ 0, XTENSA_CPE_CP7_OFFSET },
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Powermanagement idle function, if any is provided by the platform.
|
||||
*/
|
||||
|
||||
void cpu_idle(void)
|
||||
{
|
||||
local_irq_enable();
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
while (!need_resched())
|
||||
platform_idle();
|
||||
preempt_enable();
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
|
||||
void exit_thread(void)
|
||||
{
|
||||
release_coprocessors(current); /* Empty macro if no CPs are defined */
|
||||
}
|
||||
|
||||
void flush_thread(void)
|
||||
{
|
||||
release_coprocessors(current); /* Empty macro if no CPs are defined */
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy thread.
|
||||
*
|
||||
* The stack layout for the new thread looks like this:
|
||||
*
|
||||
* +------------------------+ <- sp in childregs (= tos)
|
||||
* | childregs |
|
||||
* +------------------------+ <- thread.sp = sp in dummy-frame
|
||||
* | dummy-frame | (saved in dummy-frame spill-area)
|
||||
* +------------------------+
|
||||
*
|
||||
* We create a dummy frame to return to ret_from_fork:
|
||||
* a0 points to ret_from_fork (simulating a call4)
|
||||
* sp points to itself (thread.sp)
|
||||
* a2, a3 are unused.
|
||||
*
|
||||
* Note: This is a pristine frame, so we don't need any spill region on top of
|
||||
* childregs.
|
||||
*/
|
||||
|
||||
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long unused,
|
||||
struct task_struct * p, struct pt_regs * regs)
|
||||
{
|
||||
struct pt_regs *childregs;
|
||||
unsigned long tos;
|
||||
int user_mode = user_mode(regs);
|
||||
|
||||
/* Set up new TSS. */
|
||||
tos = (unsigned long)p->thread_info + THREAD_SIZE;
|
||||
if (user_mode)
|
||||
childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
|
||||
else
|
||||
childregs = (struct pt_regs*)tos - 1;
|
||||
|
||||
*childregs = *regs;
|
||||
|
||||
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
|
||||
*((int*)childregs - 3) = (unsigned long)childregs;
|
||||
*((int*)childregs - 4) = 0;
|
||||
|
||||
childregs->areg[1] = tos;
|
||||
childregs->areg[2] = 0;
|
||||
p->set_child_tid = p->clear_child_tid = NULL;
|
||||
p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
|
||||
p->thread.sp = (unsigned long)childregs;
|
||||
if (user_mode(regs)) {
|
||||
|
||||
int len = childregs->wmask & ~0xf;
|
||||
childregs->areg[1] = usp;
|
||||
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
|
||||
®s->areg[XCHAL_NUM_AREGS - len/4], len);
|
||||
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
childregs->areg[2] = childregs->areg[6];
|
||||
|
||||
} else {
|
||||
/* In kernel space, we start a new thread with a new stack. */
|
||||
childregs->wmask = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Create a kernel thread
|
||||
*/
|
||||
|
||||
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
{
|
||||
long retval;
|
||||
__asm__ __volatile__
|
||||
("mov a5, %4\n\t" /* preserve fn in a5 */
|
||||
"mov a6, %3\n\t" /* preserve and setup arg in a6 */
|
||||
"movi a2, %1\n\t" /* load __NR_clone for syscall*/
|
||||
"mov a3, sp\n\t" /* sp check and sys_clone */
|
||||
"mov a4, %5\n\t" /* load flags for syscall */
|
||||
"syscall\n\t"
|
||||
"beq a3, sp, 1f\n\t" /* branch if parent */
|
||||
"callx4 a5\n\t" /* call fn */
|
||||
"movi a2, %2\n\t" /* load __NR_exit for syscall */
|
||||
"mov a3, a6\n\t" /* load fn return value */
|
||||
"syscall\n"
|
||||
"1:\n\t"
|
||||
"mov %0, a2\n\t" /* parent returns zero */
|
||||
:"=r" (retval)
|
||||
:"i" (__NR_clone), "i" (__NR_exit),
|
||||
"r" (arg), "r" (fn),
|
||||
"r" (flags | CLONE_VM)
|
||||
: "a2", "a3", "a4", "a5", "a6" );
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* These bracket the sleeping functions..
|
||||
*/
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
unsigned long sp, pc;
|
||||
unsigned long stack_page = (unsigned long) p->thread_info;
|
||||
int count = 0;
|
||||
|
||||
if (!p || p == current || p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
|
||||
sp = p->thread.sp;
|
||||
pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
|
||||
|
||||
do {
|
||||
if (sp < stack_page + sizeof(struct task_struct) ||
|
||||
sp >= (stack_page + THREAD_SIZE) ||
|
||||
pc == 0)
|
||||
return 0;
|
||||
if (!in_sched_functions(pc))
|
||||
return pc;
|
||||
|
||||
/* Stack layout: sp-4: ra, sp-3: sp' */
|
||||
|
||||
pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
|
||||
sp = *(unsigned long *)sp - 3;
|
||||
} while (count++ < 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* do_copy_regs() gathers information from 'struct pt_regs' and
|
||||
* 'current->thread.areg[]' to fill in the xtensa_gregset_t
|
||||
* structure.
|
||||
*
|
||||
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats
|
||||
* of processor registers. Besides different ordering,
|
||||
* xtensa_gregset_t contains non-live register information that
|
||||
* 'struct pt_regs' does not. Exception handling (primarily) uses
|
||||
* 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
|
||||
*
|
||||
*/
|
||||
|
||||
void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
int i, n, wb_offset;
|
||||
|
||||
elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
|
||||
elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
|
||||
|
||||
__asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
|
||||
elfregs->cpux = i;
|
||||
__asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
|
||||
elfregs->cpuy = i;
|
||||
|
||||
/* Note: PS.EXCM is not set while user task is running; its
|
||||
* being set in regs->ps is for exception handling convenience.
|
||||
*/
|
||||
|
||||
elfregs->pc = regs->pc;
|
||||
elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
|
||||
elfregs->exccause = regs->exccause;
|
||||
elfregs->excvaddr = regs->excvaddr;
|
||||
elfregs->windowbase = regs->windowbase;
|
||||
elfregs->windowstart = regs->windowstart;
|
||||
elfregs->lbeg = regs->lbeg;
|
||||
elfregs->lend = regs->lend;
|
||||
elfregs->lcount = regs->lcount;
|
||||
elfregs->sar = regs->sar;
|
||||
elfregs->syscall = regs->syscall;
|
||||
|
||||
/* Copy register file.
|
||||
* The layout looks like this:
|
||||
*
|
||||
* | a0 ... a15 | Z ... Z | arX ... arY |
|
||||
* current window unused saved frames
|
||||
*/
|
||||
|
||||
memset (elfregs->ar, 0, sizeof(elfregs->ar));
|
||||
|
||||
wb_offset = regs->windowbase * 4;
|
||||
n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
|
||||
|
||||
n = (regs->wmask >> 4) * 4;
|
||||
|
||||
for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
|
||||
elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
|
||||
}
|
||||
|
||||
void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
|
||||
{
|
||||
do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
|
||||
}
|
||||
|
||||
|
||||
/* The inverse of do_copy_regs(). No error or sanity checking. */
|
||||
|
||||
void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
int i, n, wb_offset;
|
||||
|
||||
/* Note: PS.EXCM is not set while user task is running; it
|
||||
* needs to be set in regs->ps is for exception handling convenience.
|
||||
*/
|
||||
|
||||
regs->pc = elfregs->pc;
|
||||
regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
|
||||
regs->exccause = elfregs->exccause;
|
||||
regs->excvaddr = elfregs->excvaddr;
|
||||
regs->windowbase = elfregs->windowbase;
|
||||
regs->windowstart = elfregs->windowstart;
|
||||
regs->lbeg = elfregs->lbeg;
|
||||
regs->lend = elfregs->lend;
|
||||
regs->lcount = elfregs->lcount;
|
||||
regs->sar = elfregs->sar;
|
||||
regs->syscall = elfregs->syscall;
|
||||
|
||||
/* Clear everything. */
|
||||
|
||||
memset (regs->areg, 0, sizeof(regs->areg));
|
||||
|
||||
/* Copy regs from live window frame. */
|
||||
|
||||
wb_offset = regs->windowbase * 4;
|
||||
n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
|
||||
|
||||
n = (regs->wmask >> 4) * 4;
|
||||
|
||||
for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
|
||||
regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* do_save_fpregs() gathers information from 'struct pt_regs' and
|
||||
* 'current->thread' to fill in the elf_fpregset_t structure.
|
||||
*
|
||||
* Core files and ptrace use elf_fpregset_t.
|
||||
*/
|
||||
|
||||
void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
#if XCHAL_HAVE_CP
|
||||
|
||||
extern unsigned char _xtensa_reginfo_tables[];
|
||||
extern unsigned _xtensa_reginfo_table_size;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
/* Before dumping coprocessor state from memory,
|
||||
* ensure any live coprocessor contents for this
|
||||
* task are first saved to memory:
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
for (i = 0; i < XCHAL_CP_MAX; i++) {
|
||||
if (tsk == coprocessor_info[i].owner) {
|
||||
enable_coprocessor(i);
|
||||
save_coprocessor_registers(
|
||||
tsk->thread.cp_save+coprocessor_info[i].offset,i);
|
||||
disable_coprocessor(i);
|
||||
}
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Now dump coprocessor & extra state: */
|
||||
memcpy((unsigned char*)fpregs,
|
||||
_xtensa_reginfo_tables, _xtensa_reginfo_table_size);
|
||||
memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
|
||||
tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* The inverse of do_save_fpregs().
|
||||
* Copies coprocessor and extra state from fpregs into regs and tsk->thread.
|
||||
* Returns 0 on success, non-zero if layout doesn't match.
|
||||
*/
|
||||
|
||||
int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
#if XCHAL_HAVE_CP
|
||||
|
||||
extern unsigned char _xtensa_reginfo_tables[];
|
||||
extern unsigned _xtensa_reginfo_table_size;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
/* Make sure save area layouts match.
|
||||
* FIXME: in the future we could allow restoring from
|
||||
* a different layout of the same registers, by comparing
|
||||
* fpregs' table with _xtensa_reginfo_tables and matching
|
||||
* entries and copying registers one at a time.
|
||||
* Not too sure yet whether that's very useful.
|
||||
*/
|
||||
|
||||
if( memcmp((unsigned char*)fpregs,
|
||||
_xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Before restoring coprocessor state from memory,
|
||||
* ensure any live coprocessor contents for this
|
||||
* task are first invalidated.
|
||||
*/
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
for (i = 0; i < XCHAL_CP_MAX; i++) {
|
||||
if (tsk == coprocessor_info[i].owner) {
|
||||
enable_coprocessor(i);
|
||||
save_coprocessor_registers(
|
||||
tsk->thread.cp_save+coprocessor_info[i].offset,i);
|
||||
coprocessor_info[i].owner = 0;
|
||||
disable_coprocessor(i);
|
||||
}
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Now restore coprocessor & extra state: */
|
||||
|
||||
memcpy(tsk->thread.cp_save,
|
||||
(unsigned char*)fpregs + _xtensa_reginfo_table_size,
|
||||
XTENSA_CP_EXTRA_SIZE);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Fill in the CP structure for a core dump for a particular task.
|
||||
*/
|
||||
|
||||
int
|
||||
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
|
||||
{
|
||||
/* see asm/coprocessor.h for this magic number 16 */
|
||||
#if TOTAL_CPEXTRA_SIZE > 16
|
||||
do_save_fpregs (r, regs, task);
|
||||
|
||||
/* For now, bit 16 means some extra state may be present: */
|
||||
// FIXME!! need to track to return more accurate mask
|
||||
return 0x10000 | XCHAL_CP_MASK;
|
||||
#else
|
||||
return 0; /* no coprocessors active on this processor */
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the CP structure for a core dump.
|
||||
* This includes any FPU coprocessor.
|
||||
* Here, we dump all coprocessors, and other ("extra") custom state.
|
||||
*
|
||||
* This function is called by elf_core_dump() in fs/binfmt_elf.c
|
||||
* (in which case 'regs' comes from calls to do_coredump, see signals.c).
|
||||
*/
|
||||
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
|
||||
{
|
||||
return dump_task_fpu(regs, current, r);
|
||||
}
|
||||
407
arch/xtensa/kernel/ptrace.c
Normal file
407
arch/xtensa/kernel/ptrace.c
Normal file
@@ -0,0 +1,407 @@
|
||||
// TODO some minor issues
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Scott Foehner<sfoehner@yahoo.com>,
|
||||
* Kevin Chea
|
||||
* Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/security.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/elf.h>
|
||||
|
||||
#define TEST_KERNEL // verify kernel operations FIXME: remove
|
||||
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
* Make sure single step bits etc are not set.
|
||||
*/
|
||||
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
/* Nothing to do.. */
|
||||
}
|
||||
|
||||
int sys_ptrace(long request, long pid, long addr, long data)
|
||||
{
|
||||
struct task_struct *child;
|
||||
int ret = -EPERM;
|
||||
|
||||
lock_kernel();
|
||||
|
||||
#if 0
|
||||
if ((int)request != 1)
|
||||
printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
|
||||
(int) request, (int) pid, (unsigned long) addr,
|
||||
(unsigned long) data);
|
||||
#endif
|
||||
|
||||
if (request == PTRACE_TRACEME) {
|
||||
|
||||
/* Are we already being traced? */
|
||||
|
||||
if (current->ptrace & PT_PTRACED)
|
||||
goto out;
|
||||
|
||||
if ((ret = security_ptrace(current->parent, current)))
|
||||
goto out;
|
||||
|
||||
/* Set the ptrace bit in the process flags. */
|
||||
|
||||
current->ptrace |= PT_PTRACED;
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = -ESRCH;
|
||||
read_lock(&tasklist_lock);
|
||||
child = find_task_by_pid(pid);
|
||||
if (child)
|
||||
get_task_struct(child);
|
||||
read_unlock(&tasklist_lock);
|
||||
if (!child)
|
||||
goto out;
|
||||
|
||||
ret = -EPERM;
|
||||
if (pid == 1) /* you may not mess with init */
|
||||
goto out;
|
||||
|
||||
if (request == PTRACE_ATTACH) {
|
||||
ret = ptrace_attach(child);
|
||||
goto out_tsk;
|
||||
}
|
||||
|
||||
if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
|
||||
goto out_tsk;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_PEEKTEXT: /* read word at location addr. */
|
||||
case PTRACE_PEEKDATA:
|
||||
{
|
||||
unsigned long tmp;
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
|
||||
ret = -EIO;
|
||||
if (copied != sizeof(tmp))
|
||||
break;
|
||||
ret = put_user(tmp,(unsigned long *) data);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Read the word at location addr in the USER area. */
|
||||
|
||||
case PTRACE_PEEKUSR:
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
unsigned long tmp;
|
||||
|
||||
regs = xtensa_pt_regs(child);
|
||||
tmp = 0; /* Default return value. */
|
||||
|
||||
switch(addr) {
|
||||
|
||||
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
|
||||
{
|
||||
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
|
||||
ar &= (XCHAL_NUM_AREGS - 1);
|
||||
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
|
||||
tmp = regs->areg[ar];
|
||||
else
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
case REG_A_BASE ... REG_A_BASE + 15:
|
||||
tmp = regs->areg[addr - REG_A_BASE];
|
||||
break;
|
||||
case REG_PC:
|
||||
tmp = regs->pc;
|
||||
break;
|
||||
case REG_PS:
|
||||
/* Note: PS.EXCM is not set while user task is running;
|
||||
* its being set in regs is for exception handling
|
||||
* convenience. */
|
||||
tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
|
||||
break;
|
||||
case REG_WB:
|
||||
tmp = regs->windowbase;
|
||||
break;
|
||||
case REG_WS:
|
||||
tmp = regs->windowstart;
|
||||
break;
|
||||
case REG_LBEG:
|
||||
tmp = regs->lbeg;
|
||||
break;
|
||||
case REG_LEND:
|
||||
tmp = regs->lend;
|
||||
break;
|
||||
case REG_LCOUNT:
|
||||
tmp = regs->lcount;
|
||||
break;
|
||||
case REG_SAR:
|
||||
tmp = regs->sar;
|
||||
break;
|
||||
case REG_DEPC:
|
||||
tmp = regs->depc;
|
||||
break;
|
||||
case REG_EXCCAUSE:
|
||||
tmp = regs->exccause;
|
||||
break;
|
||||
case REG_EXCVADDR:
|
||||
tmp = regs->excvaddr;
|
||||
break;
|
||||
case SYSCALL_NR:
|
||||
tmp = regs->syscall;
|
||||
break;
|
||||
default:
|
||||
tmp = 0;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
ret = put_user(tmp, (unsigned long *) data);
|
||||
goto out;
|
||||
}
|
||||
|
||||
case PTRACE_POKETEXT: /* write the word at location addr. */
|
||||
case PTRACE_POKEDATA:
|
||||
if (access_process_vm(child, addr, &data, sizeof(data), 1)
|
||||
== sizeof(data))
|
||||
break;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
||||
case PTRACE_POKEUSR:
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
regs = xtensa_pt_regs(child);
|
||||
|
||||
switch (addr) {
|
||||
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
|
||||
{
|
||||
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
|
||||
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
|
||||
regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
|
||||
else
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
case REG_A_BASE ... REG_A_BASE + 15:
|
||||
regs->areg[addr - REG_A_BASE] = data;
|
||||
break;
|
||||
case REG_PC:
|
||||
regs->pc = data;
|
||||
break;
|
||||
case SYSCALL_NR:
|
||||
regs->syscall = data;
|
||||
break;
|
||||
#ifdef TEST_KERNEL
|
||||
case REG_WB:
|
||||
regs->windowbase = data;
|
||||
break;
|
||||
case REG_WS:
|
||||
regs->windowstart = data;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
/* The rest are not allowed. */
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* continue and stop at next (return from) syscall */
|
||||
case PTRACE_SYSCALL:
|
||||
case PTRACE_CONT: /* restart after signal. */
|
||||
{
|
||||
ret = -EIO;
|
||||
if ((unsigned long) data > _NSIG)
|
||||
break;
|
||||
if (request == PTRACE_SYSCALL)
|
||||
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
else
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
child->exit_code = data;
|
||||
/* Make sure the single step bit is not set. */
|
||||
child->ptrace &= ~PT_SINGLESTEP;
|
||||
wake_up_process(child);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* make the child exit. Best I can do is send it a sigkill.
|
||||
* perhaps it should be put in the status that it wants to
|
||||
* exit.
|
||||
*/
|
||||
case PTRACE_KILL:
|
||||
ret = 0;
|
||||
if (child->state == EXIT_ZOMBIE) /* already dead */
|
||||
break;
|
||||
child->exit_code = SIGKILL;
|
||||
child->ptrace &= ~PT_SINGLESTEP;
|
||||
wake_up_process(child);
|
||||
break;
|
||||
|
||||
case PTRACE_SINGLESTEP:
|
||||
ret = -EIO;
|
||||
if ((unsigned long) data > _NSIG)
|
||||
break;
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
child->ptrace |= PT_SINGLESTEP;
|
||||
child->exit_code = data;
|
||||
wake_up_process(child);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case PTRACE_GETREGS:
|
||||
{
|
||||
/* 'data' points to user memory in which to write.
|
||||
* Mainly due to the non-live register values, we
|
||||
* reformat the register values into something more
|
||||
* standard. For convenience, we use the handy
|
||||
* elf_gregset_t format. */
|
||||
|
||||
xtensa_gregset_t format;
|
||||
struct pt_regs *regs = xtensa_pt_regs(child);
|
||||
|
||||
do_copy_regs (&format, regs, child);
|
||||
|
||||
/* Now, copy to user space nice and easy... */
|
||||
ret = 0;
|
||||
if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETREGS:
|
||||
{
|
||||
/* 'data' points to user memory that contains the new
|
||||
* values in the elf_gregset_t format. */
|
||||
|
||||
xtensa_gregset_t format;
|
||||
struct pt_regs *regs = xtensa_pt_regs(child);
|
||||
|
||||
if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
/* FIXME: Perhaps we want some sanity checks on
|
||||
* these user-space values? See ARM version. Are
|
||||
* debuggers a security concern? */
|
||||
|
||||
do_restore_regs (&format, regs, child);
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_GETFPREGS:
|
||||
{
|
||||
/* 'data' points to user memory in which to write.
|
||||
* For convenience, we use the handy
|
||||
* elf_fpregset_t format. */
|
||||
|
||||
elf_fpregset_t fpregs;
|
||||
struct pt_regs *regs = xtensa_pt_regs(child);
|
||||
|
||||
do_save_fpregs (&fpregs, regs, child);
|
||||
|
||||
/* Now, copy to user space nice and easy... */
|
||||
ret = 0;
|
||||
if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
|
||||
ret = -EFAULT;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETFPREGS:
|
||||
{
|
||||
/* 'data' points to user memory that contains the new
|
||||
* values in the elf_fpregset_t format.
|
||||
*/
|
||||
elf_fpregset_t fpregs;
|
||||
struct pt_regs *regs = xtensa_pt_regs(child);
|
||||
|
||||
ret = 0;
|
||||
if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
if (do_restore_fpregs (&fpregs, regs, child))
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_GETFPREGSIZE:
|
||||
/* 'data' points to 'unsigned long' set to the size
|
||||
* of elf_fpregset_t
|
||||
*/
|
||||
ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
|
||||
break;
|
||||
|
||||
case PTRACE_DETACH: /* detach a process that was attached. */
|
||||
ret = ptrace_detach(child, data);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
goto out;
|
||||
}
|
||||
out_tsk:
|
||||
put_task_struct(child);
|
||||
out:
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void do_syscall_trace(void)
|
||||
{
|
||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
return;
|
||||
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The 0x80 provides a way for the tracing parent to distinguish
|
||||
* between a syscall stop and SIGTRAP delivery
|
||||
*/
|
||||
ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
|
||||
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
if (current->exit_code) {
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
}
|
||||
226
arch/xtensa/kernel/semaphore.c
Normal file
226
arch/xtensa/kernel/semaphore.c
Normal file
@@ -0,0 +1,226 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/semaphore.c
|
||||
*
|
||||
* Generic semaphore code. Buyer beware. Do your own specific changes
|
||||
* in <asm/semaphore-helper.h>
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
||||
* Kevin Chea
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
/*
|
||||
* These two _must_ execute atomically wrt each other.
|
||||
*/
|
||||
|
||||
static __inline__ void wake_one_more(struct semaphore * sem)
|
||||
{
|
||||
atomic_inc((atomic_t *)&sem->sleepers);
|
||||
}
|
||||
|
||||
static __inline__ int waking_non_zero(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
||||
if (sem->sleepers > 0) {
|
||||
sem->sleepers--;
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* waking_non_zero_interruptible:
|
||||
* 1 got the lock
|
||||
* 0 go to sleep
|
||||
* -EINTR interrupted
|
||||
*
|
||||
* We must undo the sem->count down_interruptible() increment while we are
|
||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
||||
*/
|
||||
|
||||
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
||||
if (sem->sleepers > 0) {
|
||||
sem->sleepers--;
|
||||
ret = 1;
|
||||
} else if (signal_pending(tsk)) {
|
||||
atomic_inc(&sem->count);
|
||||
ret = -EINTR;
|
||||
}
|
||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* waking_non_zero_trylock:
|
||||
* 1 failed to lock
|
||||
* 0 got the lock
|
||||
*
|
||||
* We must undo the sem->count down_trylock() increment while we are
|
||||
* protected by the spinlock in order to make atomic this atomic_inc() with the
|
||||
* atomic_read() in wake_one_more(), otherwise we can race. -arca
|
||||
*/
|
||||
|
||||
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irqsave(&semaphore_wake_lock, flags);
|
||||
if (sem->sleepers <= 0)
|
||||
atomic_inc(&sem->count);
|
||||
else {
|
||||
sem->sleepers--;
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spinlock_t semaphore_wake_lock;
|
||||
|
||||
/*
|
||||
* Semaphores are implemented using a two-way counter:
|
||||
* The "count" variable is decremented for each process
|
||||
* that tries to sleep, while the "waking" variable is
|
||||
* incremented when the "up()" code goes to wake up waiting
|
||||
* processes.
|
||||
*
|
||||
* Notably, the inline "up()" and "down()" functions can
|
||||
* efficiently test if they need to do any extra work (up
|
||||
* needs to do something only if count was negative before
|
||||
* the increment operation.
|
||||
*
|
||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
||||
* atomically.
|
||||
*
|
||||
* When __up() is called, the count was negative before
|
||||
* incrementing it, and we need to wake up somebody.
|
||||
*
|
||||
* This routine adds one to the count of processes that need to
|
||||
* wake up and exit. ALL waiting processes actually wake up but
|
||||
* only the one that gets to the "waking" field first will gate
|
||||
* through and acquire the semaphore. The others will go back
|
||||
* to sleep.
|
||||
*
|
||||
* Note that these functions are only called when there is
|
||||
* contention on the lock, and as such all this is the
|
||||
* "non-critical" part of the whole semaphore business. The
|
||||
* critical part is the inline stuff in <asm/semaphore.h>
|
||||
* where we want to avoid any extra jumps and calls.
|
||||
*/
|
||||
|
||||
void __up(struct semaphore *sem)
|
||||
{
|
||||
wake_one_more(sem);
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the "down" function. Return zero for semaphore acquired,
|
||||
* return negative for signalled out of the function.
|
||||
*
|
||||
* If called from __down, the return is ignored and the wait loop is
|
||||
* not interruptible. This means that a task waiting on a semaphore
|
||||
* using "down()" cannot be killed until someone does an "up()" on
|
||||
* the semaphore.
|
||||
*
|
||||
* If called from __down_interruptible, the return value gets checked
|
||||
* upon return. If the return value is negative then the task continues
|
||||
* with the negative value in the return register (it can be tested by
|
||||
* the caller).
|
||||
*
|
||||
* Either form may be used in conjunction with "up()".
|
||||
*
|
||||
*/
|
||||
|
||||
#define DOWN_VAR \
|
||||
struct task_struct *tsk = current; \
|
||||
wait_queue_t wait; \
|
||||
init_waitqueue_entry(&wait, tsk);
|
||||
|
||||
#define DOWN_HEAD(task_state) \
|
||||
\
|
||||
\
|
||||
tsk->state = (task_state); \
|
||||
add_wait_queue(&sem->wait, &wait); \
|
||||
\
|
||||
/* \
|
||||
* Ok, we're set up. sem->count is known to be less than zero \
|
||||
* so we must wait. \
|
||||
* \
|
||||
* We can let go the lock for purposes of waiting. \
|
||||
* We re-acquire it after awaking so as to protect \
|
||||
* all semaphore operations. \
|
||||
* \
|
||||
* If "up()" is called before we call waking_non_zero() then \
|
||||
* we will catch it right away. If it is called later then \
|
||||
* we will have to go through a wakeup cycle to catch it. \
|
||||
* \
|
||||
* Multiple waiters contend for the semaphore lock to see \
|
||||
* who gets to gate through and who has to wait some more. \
|
||||
*/ \
|
||||
for (;;) {
|
||||
|
||||
#define DOWN_TAIL(task_state) \
|
||||
tsk->state = (task_state); \
|
||||
} \
|
||||
tsk->state = TASK_RUNNING; \
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
|
||||
void __sched __down(struct semaphore * sem)
|
||||
{
|
||||
DOWN_VAR
|
||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
||||
if (waking_non_zero(sem))
|
||||
break;
|
||||
schedule();
|
||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
||||
}
|
||||
|
||||
int __sched __down_interruptible(struct semaphore * sem)
|
||||
{
|
||||
int ret = 0;
|
||||
DOWN_VAR
|
||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
||||
|
||||
ret = waking_non_zero_interruptible(sem, tsk);
|
||||
if (ret)
|
||||
{
|
||||
if (ret == 1)
|
||||
/* ret != 0 only if we get interrupted -arca */
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __down_trylock(struct semaphore * sem)
|
||||
{
|
||||
return waking_non_zero_trylock(sem);
|
||||
}
|
||||
520
arch/xtensa/kernel/setup.c
Normal file
520
arch/xtensa/kernel/setup.c
Normal file
File diff suppressed because it is too large
Load Diff
713
arch/xtensa/kernel/signal.c
Normal file
713
arch/xtensa/kernel/signal.c
Normal file
File diff suppressed because it is too large
Load Diff
418
arch/xtensa/kernel/syscalls.c
Normal file
418
arch/xtensa/kernel/syscalls.c
Normal file
@@ -0,0 +1,418 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/syscall.c
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
* Copyright (C) 2000 Silicon Graphics, Inc.
|
||||
* Copyright (C) 1995 - 2000 by Ralf Baechle
|
||||
*
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Kevin Chea
|
||||
*
|
||||
*/
|
||||
|
||||
#define DEBUG 0
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/sem.h>
|
||||
#include <linux/msg.h>
|
||||
#include <linux/shm.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mman.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ipc.h>
|
||||
|
||||
extern void do_syscall_trace(void);
|
||||
typedef int (*syscall_t)(void *a0,...);
|
||||
extern int (*do_syscalls)(struct pt_regs *regs, syscall_t fun,
|
||||
int narg);
|
||||
extern syscall_t sys_call_table[];
|
||||
extern unsigned char sys_narg_table[];
|
||||
|
||||
/*
|
||||
* sys_pipe() is the normal C calling standard for creating a pipe. It's not
|
||||
* the way unix traditional does this, though.
|
||||
*/
|
||||
|
||||
int sys_pipe(int __user *userfds)
|
||||
{
|
||||
int fd[2];
|
||||
int error;
|
||||
|
||||
error = do_pipe(fd);
|
||||
if (!error) {
|
||||
if (copy_to_user(userfds, fd, 2 * sizeof(int)))
|
||||
error = -EFAULT;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common code for old and new mmaps.
|
||||
*/
|
||||
|
||||
static inline long do_mmap2(unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long pgoff)
|
||||
{
|
||||
int error = -EBADF;
|
||||
struct file * file = NULL;
|
||||
|
||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
if (file)
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
unsigned long old_mmap(unsigned long addr, size_t len, int prot,
|
||||
int flags, int fd, off_t offset)
|
||||
{
|
||||
return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
|
||||
unsigned long flags, unsigned long fd, unsigned long pgoff)
|
||||
{
|
||||
return do_mmap2(addr, len, prot, flags, fd, pgoff);
|
||||
}
|
||||
|
||||
int sys_fork(struct pt_regs *regs)
|
||||
{
|
||||
return do_fork(SIGCHLD, regs->areg[1], regs, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
int sys_vfork(struct pt_regs *regs)
|
||||
{
|
||||
return do_fork(CLONE_VFORK|CLONE_VM|SIGCHLD, regs->areg[1],
|
||||
regs, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
int sys_clone(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long clone_flags;
|
||||
unsigned long newsp;
|
||||
int __user *parent_tidptr, *child_tidptr;
|
||||
clone_flags = regs->areg[4];
|
||||
newsp = regs->areg[3];
|
||||
parent_tidptr = (int __user *)regs->areg[5];
|
||||
child_tidptr = (int __user *)regs->areg[6];
|
||||
if (!newsp)
|
||||
newsp = regs->areg[1];
|
||||
return do_fork(clone_flags,newsp,regs,0,parent_tidptr,child_tidptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* sys_execve() executes a new program.
|
||||
*/
|
||||
|
||||
int sys_execve(struct pt_regs *regs)
|
||||
{
|
||||
int error;
|
||||
char * filename;
|
||||
|
||||
filename = getname((char *) (long)regs->areg[5]);
|
||||
error = PTR_ERR(filename);
|
||||
if (IS_ERR(filename))
|
||||
goto out;
|
||||
error = do_execve(filename, (char **) (long)regs->areg[3],
|
||||
(char **) (long)regs->areg[4], regs);
|
||||
putname(filename);
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
int sys_uname(struct old_utsname * name)
|
||||
{
|
||||
if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
|
||||
return 0;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
int sys_olduname(struct oldold_utsname * name)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!name)
|
||||
return -EFAULT;
|
||||
if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
|
||||
return -EFAULT;
|
||||
|
||||
error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
|
||||
error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
|
||||
error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
|
||||
error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
|
||||
error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
|
||||
error -= __put_user(0,name->release+__OLD_UTS_LEN);
|
||||
error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
|
||||
error -= __put_user(0,name->version+__OLD_UTS_LEN);
|
||||
error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
|
||||
error -= __put_user(0,name->machine+__OLD_UTS_LEN);
|
||||
|
||||
return error ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Build the string table for the builtin "poor man's strace".
|
||||
*/
|
||||
|
||||
#if DEBUG
|
||||
#define SYSCALL(fun, narg) #fun,
|
||||
static char *sfnames[] = {
|
||||
#include "syscalls.h"
|
||||
};
|
||||
#undef SYS
|
||||
#endif
|
||||
|
||||
void system_call (struct pt_regs *regs)
|
||||
{
|
||||
syscall_t syscall;
|
||||
unsigned long parm0, parm1, parm2, parm3, parm4, parm5;
|
||||
int nargs, res;
|
||||
unsigned int syscallnr;
|
||||
int ps;
|
||||
|
||||
#if DEBUG
|
||||
int i;
|
||||
unsigned long parms[6];
|
||||
char *sysname;
|
||||
#endif
|
||||
|
||||
regs->syscall = regs->areg[2];
|
||||
|
||||
do_syscall_trace();
|
||||
|
||||
/* Have to load after syscall_trace because strace
|
||||
* sometimes changes regs->syscall.
|
||||
*/
|
||||
syscallnr = regs->syscall;
|
||||
|
||||
parm0 = parm1 = parm2 = parm3 = parm4 = parm5 = 0;
|
||||
|
||||
/* Restore interrupt level to syscall invoker's.
|
||||
* If this were in assembly, we wouldn't disable
|
||||
* interrupts in the first place:
|
||||
*/
|
||||
local_save_flags (ps);
|
||||
local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
|
||||
(regs->ps & XCHAL_PS_INTLEVEL_MASK) );
|
||||
|
||||
if (syscallnr > __NR_Linux_syscalls) {
|
||||
regs->areg[2] = -ENOSYS;
|
||||
return;
|
||||
}
|
||||
|
||||
syscall = sys_call_table[syscallnr];
|
||||
nargs = sys_narg_table[syscallnr];
|
||||
|
||||
if (syscall == NULL) {
|
||||
regs->areg[2] = -ENOSYS;
|
||||
return;
|
||||
}
|
||||
|
||||
/* There shouldn't be more than six arguments in the table! */
|
||||
|
||||
if (nargs > 6)
|
||||
panic("Internal error - too many syscall arguments (%d)!\n",
|
||||
nargs);
|
||||
|
||||
/* Linux takes system-call arguments in registers. The ABI
|
||||
* and Xtensa software conventions require the system-call
|
||||
* number in a2. If an argument exists in a2, we move it to
|
||||
* the next available register. Note that for improved
|
||||
* efficiency, we do NOT shift all parameters down one
|
||||
* register to maintain the original order.
|
||||
*
|
||||
* At best case (zero arguments), we just write the syscall
|
||||
* number to a2. At worst case (1 to 6 arguments), we move
|
||||
* the argument in a2 to the next available register, then
|
||||
* write the syscall number to a2.
|
||||
*
|
||||
* For clarity, the following truth table enumerates all
|
||||
* possibilities.
|
||||
*
|
||||
* arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
|
||||
* --------- -------------- ----------------------------------
|
||||
* 0 a2
|
||||
* 1 a2 a3
|
||||
* 2 a2 a4, a3
|
||||
* 3 a2 a5, a3, a4
|
||||
* 4 a2 a6, a3, a4, a5
|
||||
* 5 a2 a7, a3, a4, a5, a6
|
||||
* 6 a2 a8, a3, a4, a5, a6, a7
|
||||
*/
|
||||
if (nargs) {
|
||||
parm0 = regs->areg[nargs+2];
|
||||
parm1 = regs->areg[3];
|
||||
parm2 = regs->areg[4];
|
||||
parm3 = regs->areg[5];
|
||||
parm4 = regs->areg[6];
|
||||
parm5 = regs->areg[7];
|
||||
} else /* nargs == 0 */
|
||||
parm0 = (unsigned long) regs;
|
||||
|
||||
#if DEBUG
|
||||
parms[0] = parm0;
|
||||
parms[1] = parm1;
|
||||
parms[2] = parm2;
|
||||
parms[3] = parm3;
|
||||
parms[4] = parm4;
|
||||
parms[5] = parm5;
|
||||
|
||||
sysname = sfnames[syscallnr];
|
||||
if (strncmp(sysname, "sys_", 4) == 0)
|
||||
sysname = sysname + 4;
|
||||
|
||||
printk("\017SYSCALL:I:%x:%d:%s %s(", regs->pc, current->pid,
|
||||
current->comm, sysname);
|
||||
for (i = 0; i < nargs; i++)
|
||||
printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
|
||||
printk(")\n");
|
||||
#endif
|
||||
|
||||
res = syscall((void *)parm0, parm1, parm2, parm3, parm4, parm5);
|
||||
|
||||
#if DEBUG
|
||||
printk("\017SYSCALL:O:%d:%s %s(",current->pid, current->comm, sysname);
|
||||
for (i = 0; i < nargs; i++)
|
||||
printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
|
||||
if (res < 4096)
|
||||
printk(") = %d\n", res);
|
||||
else
|
||||
printk(") = %#x\n", res);
|
||||
#endif /* DEBUG */
|
||||
|
||||
regs->areg[2] = res;
|
||||
do_syscall_trace();
|
||||
}
|
||||
|
||||
/*
|
||||
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
|
||||
*
|
||||
* This is really horribly ugly.
|
||||
*/
|
||||
|
||||
int sys_ipc (uint call, int first, int second,
|
||||
int third, void __user *ptr, long fifth)
|
||||
{
|
||||
int version, ret;
|
||||
|
||||
version = call >> 16; /* hack for backward compatibility */
|
||||
call &= 0xffff;
|
||||
ret = -ENOSYS;
|
||||
|
||||
switch (call) {
|
||||
case SEMOP:
|
||||
ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
|
||||
second, NULL);
|
||||
break;
|
||||
|
||||
case SEMTIMEDOP:
|
||||
ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
|
||||
second, (const struct timespec *) fifth);
|
||||
break;
|
||||
|
||||
case SEMGET:
|
||||
ret = sys_semget (first, second, third);
|
||||
break;
|
||||
|
||||
case SEMCTL: {
|
||||
union semun fourth;
|
||||
|
||||
if (ptr && !get_user(fourth.__pad, (void *__user *) ptr))
|
||||
ret = sys_semctl (first, second, third, fourth);
|
||||
break;
|
||||
}
|
||||
|
||||
case MSGSND:
|
||||
ret = sys_msgsnd (first, (struct msgbuf __user*) ptr,
|
||||
second, third);
|
||||
break;
|
||||
|
||||
case MSGRCV:
|
||||
switch (version) {
|
||||
case 0: {
|
||||
struct ipc_kludge tmp;
|
||||
|
||||
if (ptr && !copy_from_user(&tmp,
|
||||
(struct ipc_kludge *) ptr,
|
||||
sizeof (tmp)))
|
||||
ret = sys_msgrcv (first, tmp.msgp, second,
|
||||
tmp.msgtyp, third);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
|
||||
second, 0, third);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case MSGGET:
|
||||
ret = sys_msgget ((key_t) first, second);
|
||||
break;
|
||||
|
||||
case MSGCTL:
|
||||
ret = sys_msgctl (first, second, (struct msqid_ds __user*) ptr);
|
||||
break;
|
||||
|
||||
case SHMAT: {
|
||||
ulong raddr;
|
||||
ret = do_shmat (first, (char __user *) ptr, second, &raddr);
|
||||
|
||||
if (!ret)
|
||||
ret = put_user (raddr, (ulong __user *) third);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case SHMDT:
|
||||
ret = sys_shmdt ((char __user *)ptr);
|
||||
break;
|
||||
|
||||
case SHMGET:
|
||||
ret = sys_shmget (first, second, third);
|
||||
break;
|
||||
|
||||
case SHMCTL:
|
||||
ret = sys_shmctl (first, second, (struct shmid_ds __user*) ptr);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
248
arch/xtensa/kernel/syscalls.h
Normal file
248
arch/xtensa/kernel/syscalls.h
Normal file
@@ -0,0 +1,248 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/syscalls.h
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Changes by Joe Taylor <joe@tensilica.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is being included twice - once to build a list of all
|
||||
* syscalls and once to build a table of how many arguments each syscall
|
||||
* accepts. Syscalls that receive a pointer to the saved registers are
|
||||
* marked as having zero arguments.
|
||||
*
|
||||
* The binary compatibility calls are in a separate list.
|
||||
*
|
||||
* Entry '0' used to be system_call. It's removed to disable indirect
|
||||
* system calls for now so user tasks can't recurse. See mips'
|
||||
* sys_syscall for a comparable example.
|
||||
*/
|
||||
|
||||
SYSCALL(0, 0) /* 00 */
|
||||
|
||||
SYSCALL(sys_exit, 1)
|
||||
SYSCALL(sys_fork, 0)
|
||||
SYSCALL(sys_read, 3)
|
||||
SYSCALL(sys_write, 3)
|
||||
SYSCALL(sys_open, 3) /* 05 */
|
||||
SYSCALL(sys_close, 1)
|
||||
SYSCALL(sys_waitpid, 3)
|
||||
SYSCALL(sys_creat, 2)
|
||||
SYSCALL(sys_link, 2)
|
||||
SYSCALL(sys_unlink, 1) /* 10 */
|
||||
SYSCALL(sys_execve, 0)
|
||||
SYSCALL(sys_chdir, 1)
|
||||
SYSCALL(sys_time, 1)
|
||||
SYSCALL(sys_mknod, 3)
|
||||
SYSCALL(sys_chmod, 2) /* 15 */
|
||||
SYSCALL(sys_lchown, 3)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_stat, 2)
|
||||
SYSCALL(sys_lseek, 3)
|
||||
SYSCALL(sys_getpid, 0) /* 20 */
|
||||
SYSCALL(sys_mount, 5)
|
||||
SYSCALL(sys_oldumount, 1)
|
||||
SYSCALL(sys_setuid, 1)
|
||||
SYSCALL(sys_getuid, 0)
|
||||
SYSCALL(sys_stime, 1) /* 25 */
|
||||
SYSCALL(sys_ptrace, 4)
|
||||
SYSCALL(sys_alarm, 1)
|
||||
SYSCALL(sys_fstat, 2)
|
||||
SYSCALL(sys_pause, 0)
|
||||
SYSCALL(sys_utime, 2) /* 30 */
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_access, 2)
|
||||
SYSCALL(sys_nice, 1)
|
||||
SYSCALL(sys_ni_syscall, 0) /* 35 */
|
||||
SYSCALL(sys_sync, 0)
|
||||
SYSCALL(sys_kill, 2)
|
||||
SYSCALL(sys_rename, 2)
|
||||
SYSCALL(sys_mkdir, 2)
|
||||
SYSCALL(sys_rmdir, 1) /* 40 */
|
||||
SYSCALL(sys_dup, 1)
|
||||
SYSCALL(sys_pipe, 1)
|
||||
SYSCALL(sys_times, 1)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_brk, 1) /* 45 */
|
||||
SYSCALL(sys_setgid, 1)
|
||||
SYSCALL(sys_getgid, 0)
|
||||
SYSCALL(sys_ni_syscall, 0) /* was signal(2) */
|
||||
SYSCALL(sys_geteuid, 0)
|
||||
SYSCALL(sys_getegid, 0) /* 50 */
|
||||
SYSCALL(sys_acct, 1)
|
||||
SYSCALL(sys_umount, 2)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_ioctl, 3)
|
||||
SYSCALL(sys_fcntl, 3) /* 55 */
|
||||
SYSCALL(sys_ni_syscall, 2)
|
||||
SYSCALL(sys_setpgid, 2)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_olduname, 1)
|
||||
SYSCALL(sys_umask, 1) /* 60 */
|
||||
SYSCALL(sys_chroot, 1)
|
||||
SYSCALL(sys_ustat, 2)
|
||||
SYSCALL(sys_dup2, 2)
|
||||
SYSCALL(sys_getppid, 0)
|
||||
SYSCALL(sys_getpgrp, 0) /* 65 */
|
||||
SYSCALL(sys_setsid, 0)
|
||||
SYSCALL(sys_sigaction, 3)
|
||||
SYSCALL(sys_sgetmask, 0)
|
||||
SYSCALL(sys_ssetmask, 1)
|
||||
SYSCALL(sys_setreuid, 2) /* 70 */
|
||||
SYSCALL(sys_setregid, 2)
|
||||
SYSCALL(sys_sigsuspend, 0)
|
||||
SYSCALL(sys_sigpending, 1)
|
||||
SYSCALL(sys_sethostname, 2)
|
||||
SYSCALL(sys_setrlimit, 2) /* 75 */
|
||||
SYSCALL(sys_getrlimit, 2)
|
||||
SYSCALL(sys_getrusage, 2)
|
||||
SYSCALL(sys_gettimeofday, 2)
|
||||
SYSCALL(sys_settimeofday, 2)
|
||||
SYSCALL(sys_getgroups, 2) /* 80 */
|
||||
SYSCALL(sys_setgroups, 2)
|
||||
SYSCALL(sys_ni_syscall, 0) /* old_select */
|
||||
SYSCALL(sys_symlink, 2)
|
||||
SYSCALL(sys_lstat, 2)
|
||||
SYSCALL(sys_readlink, 3) /* 85 */
|
||||
SYSCALL(sys_uselib, 1)
|
||||
SYSCALL(sys_swapon, 2)
|
||||
SYSCALL(sys_reboot, 3)
|
||||
SYSCALL(old_readdir, 3)
|
||||
SYSCALL(old_mmap, 6) /* 90 */
|
||||
SYSCALL(sys_munmap, 2)
|
||||
SYSCALL(sys_truncate, 2)
|
||||
SYSCALL(sys_ftruncate, 2)
|
||||
SYSCALL(sys_fchmod, 2)
|
||||
SYSCALL(sys_fchown, 3) /* 95 */
|
||||
SYSCALL(sys_getpriority, 2)
|
||||
SYSCALL(sys_setpriority, 3)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_statfs, 2)
|
||||
SYSCALL(sys_fstatfs, 2) /* 100 */
|
||||
SYSCALL(sys_ni_syscall, 3)
|
||||
SYSCALL(sys_socketcall, 2)
|
||||
SYSCALL(sys_syslog, 3)
|
||||
SYSCALL(sys_setitimer, 3)
|
||||
SYSCALL(sys_getitimer, 2) /* 105 */
|
||||
SYSCALL(sys_newstat, 2)
|
||||
SYSCALL(sys_newlstat, 2)
|
||||
SYSCALL(sys_newfstat, 2)
|
||||
SYSCALL(sys_uname, 1)
|
||||
SYSCALL(sys_ni_syscall, 0) /* 110 */
|
||||
SYSCALL(sys_vhangup, 0)
|
||||
SYSCALL(sys_ni_syscall, 0) /* was sys_idle() */
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_wait4, 4)
|
||||
SYSCALL(sys_swapoff, 1) /* 115 */
|
||||
SYSCALL(sys_sysinfo, 1)
|
||||
SYSCALL(sys_ipc, 5) /* 6 really, but glibc uses only 5) */
|
||||
SYSCALL(sys_fsync, 1)
|
||||
SYSCALL(sys_sigreturn, 0)
|
||||
SYSCALL(sys_clone, 0) /* 120 */
|
||||
SYSCALL(sys_setdomainname, 2)
|
||||
SYSCALL(sys_newuname, 1)
|
||||
SYSCALL(sys_ni_syscall, 0) /* sys_modify_ldt */
|
||||
SYSCALL(sys_adjtimex, 1)
|
||||
SYSCALL(sys_mprotect, 3) /* 125 */
|
||||
SYSCALL(sys_sigprocmask, 3)
|
||||
SYSCALL(sys_ni_syscall, 2) /* old sys_create_module */
|
||||
SYSCALL(sys_init_module, 2)
|
||||
SYSCALL(sys_delete_module, 1)
|
||||
SYSCALL(sys_ni_syscall, 1) /* old sys_get_kernel_sysm */ /* 130 */
|
||||
SYSCALL(sys_quotactl, 0)
|
||||
SYSCALL(sys_getpgid, 1)
|
||||
SYSCALL(sys_fchdir, 1)
|
||||
SYSCALL(sys_bdflush, 2)
|
||||
SYSCALL(sys_sysfs, 3) /* 135 */
|
||||
SYSCALL(sys_personality, 1)
|
||||
SYSCALL(sys_ni_syscall, 0) /* for afs_syscall */
|
||||
SYSCALL(sys_setfsuid, 1)
|
||||
SYSCALL(sys_setfsgid, 1)
|
||||
SYSCALL(sys_llseek, 5) /* 140 */
|
||||
SYSCALL(sys_getdents, 3)
|
||||
SYSCALL(sys_select, 5)
|
||||
SYSCALL(sys_flock, 2)
|
||||
SYSCALL(sys_msync, 3)
|
||||
SYSCALL(sys_readv, 3) /* 145 */
|
||||
SYSCALL(sys_writev, 3)
|
||||
SYSCALL(sys_ni_syscall, 3)
|
||||
SYSCALL(sys_ni_syscall, 3)
|
||||
SYSCALL(sys_ni_syscall, 4) /* handled in fast syscall handler. */
|
||||
SYSCALL(sys_ni_syscall, 0) /* 150 */
|
||||
SYSCALL(sys_getsid, 1)
|
||||
SYSCALL(sys_fdatasync, 1)
|
||||
SYSCALL(sys_sysctl, 1)
|
||||
SYSCALL(sys_mlock, 2)
|
||||
SYSCALL(sys_munlock, 2) /* 155 */
|
||||
SYSCALL(sys_mlockall, 1)
|
||||
SYSCALL(sys_munlockall, 0)
|
||||
SYSCALL(sys_sched_setparam,2)
|
||||
SYSCALL(sys_sched_getparam,2)
|
||||
SYSCALL(sys_sched_setscheduler,3) /* 160 */
|
||||
SYSCALL(sys_sched_getscheduler,1)
|
||||
SYSCALL(sys_sched_yield,0)
|
||||
SYSCALL(sys_sched_get_priority_max,1)
|
||||
SYSCALL(sys_sched_get_priority_min,1)
|
||||
SYSCALL(sys_sched_rr_get_interval,2) /* 165 */
|
||||
SYSCALL(sys_nanosleep,2)
|
||||
SYSCALL(sys_mremap,4)
|
||||
SYSCALL(sys_accept, 3)
|
||||
SYSCALL(sys_bind, 3)
|
||||
SYSCALL(sys_connect, 3) /* 170 */
|
||||
SYSCALL(sys_getpeername, 3)
|
||||
SYSCALL(sys_getsockname, 3)
|
||||
SYSCALL(sys_getsockopt, 5)
|
||||
SYSCALL(sys_listen, 2)
|
||||
SYSCALL(sys_recv, 4) /* 175 */
|
||||
SYSCALL(sys_recvfrom, 6)
|
||||
SYSCALL(sys_recvmsg, 3)
|
||||
SYSCALL(sys_send, 4)
|
||||
SYSCALL(sys_sendmsg, 3)
|
||||
SYSCALL(sys_sendto, 6) /* 180 */
|
||||
SYSCALL(sys_setsockopt, 5)
|
||||
SYSCALL(sys_shutdown, 2)
|
||||
SYSCALL(sys_socket, 3)
|
||||
SYSCALL(sys_socketpair, 4)
|
||||
SYSCALL(sys_setresuid, 3) /* 185 */
|
||||
SYSCALL(sys_getresuid, 3)
|
||||
SYSCALL(sys_ni_syscall, 5) /* old sys_query_module */
|
||||
SYSCALL(sys_poll, 3)
|
||||
SYSCALL(sys_nfsservctl, 3)
|
||||
SYSCALL(sys_setresgid, 3) /* 190 */
|
||||
SYSCALL(sys_getresgid, 3)
|
||||
SYSCALL(sys_prctl, 5)
|
||||
SYSCALL(sys_rt_sigreturn, 0)
|
||||
SYSCALL(sys_rt_sigaction, 4)
|
||||
SYSCALL(sys_rt_sigprocmask, 4) /* 195 */
|
||||
SYSCALL(sys_rt_sigpending, 2)
|
||||
SYSCALL(sys_rt_sigtimedwait, 4)
|
||||
SYSCALL(sys_rt_sigqueueinfo, 3)
|
||||
SYSCALL(sys_rt_sigsuspend, 0)
|
||||
SYSCALL(sys_pread64, 5) /* 200 */
|
||||
SYSCALL(sys_pwrite64, 5)
|
||||
SYSCALL(sys_chown, 3)
|
||||
SYSCALL(sys_getcwd, 2)
|
||||
SYSCALL(sys_capget, 2)
|
||||
SYSCALL(sys_capset, 2) /* 205 */
|
||||
SYSCALL(sys_sigaltstack, 0)
|
||||
SYSCALL(sys_sendfile, 4)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_ni_syscall, 0)
|
||||
SYSCALL(sys_mmap2, 6) /* 210 */
|
||||
SYSCALL(sys_truncate64, 2)
|
||||
SYSCALL(sys_ftruncate64, 2)
|
||||
SYSCALL(sys_stat64, 2)
|
||||
SYSCALL(sys_lstat64, 2)
|
||||
SYSCALL(sys_fstat64, 2) /* 215 */
|
||||
SYSCALL(sys_pivot_root, 2)
|
||||
SYSCALL(sys_mincore, 3)
|
||||
SYSCALL(sys_madvise, 3)
|
||||
SYSCALL(sys_getdents64, 3)
|
||||
SYSCALL(sys_vfork, 0) /* 220 */
|
||||
227
arch/xtensa/kernel/time.c
Normal file
227
arch/xtensa/kernel/time.c
Normal file
@@ -0,0 +1,227 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/time.c
|
||||
*
|
||||
* Timer and clock support.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2005 Tensilica Inc.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/timex.h>
|
||||
#include <asm/platform.h>
|
||||
|
||||
|
||||
extern volatile unsigned long wall_jiffies;
|
||||
|
||||
u64 jiffies_64 = INITIAL_JIFFIES;
|
||||
EXPORT_SYMBOL(jiffies_64);
|
||||
|
||||
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
|
||||
EXPORT_SYMBOL(rtc_lock);
|
||||
|
||||
|
||||
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
unsigned long ccount_per_jiffy; /* per 1/HZ */
|
||||
unsigned long ccount_nsec; /* nsec per ccount increment */
|
||||
#endif
|
||||
|
||||
unsigned int last_ccount_stamp;
|
||||
static long last_rtc_update = 0;
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current tim in nanosec units.
|
||||
*/
|
||||
|
||||
unsigned long long sched_clock(void)
|
||||
{
|
||||
return (unsigned long long)jiffies * (1000000000 / HZ);
|
||||
}
|
||||
|
||||
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
|
||||
static struct irqaction timer_irqaction = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = SA_INTERRUPT,
|
||||
.name = "timer",
|
||||
};
|
||||
|
||||
void __init time_init(void)
|
||||
{
|
||||
time_t sec_o, sec_n = 0;
|
||||
|
||||
/* The platform must provide a function to calibrate the processor
|
||||
* speed for the CALIBRATE.
|
||||
*/
|
||||
|
||||
#if CONFIG_XTENSA_CALIBRATE_CCOUNT
|
||||
printk("Calibrating CPU frequency ");
|
||||
platform_calibrate_ccount();
|
||||
printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
|
||||
(int)(ccount_per_jiffy/(10000/HZ))%100);
|
||||
#endif
|
||||
|
||||
/* Set time from RTC (if provided) */
|
||||
|
||||
if (platform_get_rtc_time(&sec_o) == 0)
|
||||
while (platform_get_rtc_time(&sec_n))
|
||||
if (sec_o != sec_n)
|
||||
break;
|
||||
|
||||
xtime.tv_nsec = 0;
|
||||
last_rtc_update = xtime.tv_sec = sec_n;
|
||||
last_ccount_stamp = get_ccount();
|
||||
|
||||
set_normalized_timespec(&wall_to_monotonic,
|
||||
-xtime.tv_sec, -xtime.tv_nsec);
|
||||
|
||||
/* Initialize the linux timer interrupt. */
|
||||
|
||||
setup_irq(LINUX_TIMER_INT, &timer_irqaction);
|
||||
set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
|
||||
}
|
||||
|
||||
|
||||
int do_settimeofday(struct timespec *tv)
|
||||
{
|
||||
time_t wtm_sec, sec = tv->tv_sec;
|
||||
long wtm_nsec, nsec = tv->tv_nsec;
|
||||
unsigned long ccount;
|
||||
|
||||
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
|
||||
return -EINVAL;
|
||||
|
||||
write_seqlock_irq(&xtime_lock);
|
||||
|
||||
/* This is revolting. We need to set "xtime" correctly. However, the
|
||||
* value in this location is the value at the most recent update of
|
||||
* wall time. Discover what correction gettimeofday() would have
|
||||
* made, and then undo it!
|
||||
*/
|
||||
ccount = get_ccount();
|
||||
nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
|
||||
nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC;
|
||||
|
||||
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
|
||||
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
|
||||
|
||||
set_normalized_timespec(&xtime, sec, nsec);
|
||||
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
|
||||
|
||||
time_adjust = 0; /* stop active adjtime() */
|
||||
time_status |= STA_UNSYNC;
|
||||
time_maxerror = NTP_PHASE_LIMIT;
|
||||
time_esterror = NTP_PHASE_LIMIT;
|
||||
write_sequnlock_irq(&xtime_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(do_settimeofday);
|
||||
|
||||
|
||||
void do_gettimeofday(struct timeval *tv)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long sec, usec, delta, lost, seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin_irqsave(&xtime_lock, flags);
|
||||
|
||||
delta = get_ccount() - last_ccount_stamp;
|
||||
sec = xtime.tv_sec;
|
||||
usec = (xtime.tv_nsec / NSEC_PER_USEC);
|
||||
|
||||
lost = jiffies - wall_jiffies;
|
||||
|
||||
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
|
||||
|
||||
usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
|
||||
for (; usec >= 1000000; sec++, usec -= 1000000)
|
||||
;
|
||||
|
||||
tv->tv_sec = sec;
|
||||
tv->tv_usec = usec;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(do_gettimeofday);
|
||||
|
||||
/*
|
||||
* The timer interrupt is called HZ times per second.
|
||||
*/
|
||||
|
||||
irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
|
||||
{
|
||||
|
||||
unsigned long next;
|
||||
|
||||
next = get_linux_timer();
|
||||
|
||||
again:
|
||||
while ((signed long)(get_ccount() - next) > 0) {
|
||||
|
||||
profile_tick(CPU_PROFILING, regs);
|
||||
#ifndef CONFIG_SMP
|
||||
update_process_times(user_mode(regs));
|
||||
#endif
|
||||
|
||||
write_seqlock(&xtime_lock);
|
||||
|
||||
last_ccount_stamp = next;
|
||||
next += CCOUNT_PER_JIFFY;
|
||||
do_timer (regs); /* Linux handler in kernel/timer.c */
|
||||
|
||||
if ((time_status & STA_UNSYNC) == 0 &&
|
||||
xtime.tv_sec - last_rtc_update >= 659 &&
|
||||
abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
|
||||
jiffies - wall_jiffies == 1) {
|
||||
|
||||
if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
|
||||
last_rtc_update = xtime.tv_sec+1;
|
||||
else
|
||||
/* Do it again in 60 s */
|
||||
last_rtc_update += 60;
|
||||
}
|
||||
write_sequnlock(&xtime_lock);
|
||||
}
|
||||
|
||||
/* NOTE: writing CCOMPAREn clears the interrupt. */
|
||||
|
||||
set_linux_timer (next);
|
||||
|
||||
/* Make sure we didn't miss any tick... */
|
||||
|
||||
if ((signed long)(get_ccount() - next) > 0)
|
||||
goto again;
|
||||
|
||||
/* Allow platform to do something usefull (Wdog). */
|
||||
|
||||
platform_heartbeat();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
|
||||
void __devinit calibrate_delay(void)
|
||||
{
|
||||
loops_per_jiffy = CCOUNT_PER_JIFFY;
|
||||
printk("Calibrating delay loop (skipped)... "
|
||||
"%lu.%02lu BogoMIPS preset\n",
|
||||
loops_per_jiffy/(1000000/HZ),
|
||||
(loops_per_jiffy/(10000/HZ)) % 100);
|
||||
}
|
||||
#endif
|
||||
|
||||
498
arch/xtensa/kernel/traps.c
Normal file
498
arch/xtensa/kernel/traps.c
Normal file
@@ -0,0 +1,498 @@
|
||||
/*
|
||||
* arch/xtensa/kernel/traps.c
|
||||
*
|
||||
* Exception handling.
|
||||
*
|
||||
* Derived from code with the following copyrights:
|
||||
* Copyright (C) 1994 - 1999 by Ralf Baechle
|
||||
* Modified for R3000 by Paul M. Antoine, 1995, 1996
|
||||
* Complete output from die() by Ulf Carlsson, 1998
|
||||
* Copyright (C) 1999 Silicon Graphics, Inc.
|
||||
*
|
||||
* Essentially rewritten for the Xtensa architecture port.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
||||
* Kevin Chea
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
extern int gdb_enter;
|
||||
extern int return_from_debug_flag;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Machine specific interrupt handlers
|
||||
*/
|
||||
|
||||
extern void kernel_exception(void);
|
||||
extern void user_exception(void);
|
||||
|
||||
extern void fast_syscall_kernel(void);
|
||||
extern void fast_syscall_user(void);
|
||||
extern void fast_alloca(void);
|
||||
extern void fast_unaligned(void);
|
||||
extern void fast_second_level_miss(void);
|
||||
extern void fast_store_prohibited(void);
|
||||
extern void fast_coprocessor(void);
|
||||
|
||||
extern void do_illegal_instruction (struct pt_regs*);
|
||||
extern void do_interrupt (struct pt_regs*);
|
||||
extern void do_unaligned_user (struct pt_regs*);
|
||||
extern void do_multihit (struct pt_regs*, unsigned long);
|
||||
extern void do_page_fault (struct pt_regs*, unsigned long);
|
||||
extern void do_debug (struct pt_regs*);
|
||||
extern void system_call (struct pt_regs*);
|
||||
|
||||
/*
|
||||
* The vector table must be preceded by a save area (which
|
||||
* implies it must be in RAM, unless one places RAM immediately
|
||||
* before a ROM and puts the vector at the start of the ROM (!))
|
||||
*/
|
||||
|
||||
#define KRNL 0x01
|
||||
#define USER 0x02
|
||||
|
||||
#define COPROCESSOR(x) \
|
||||
{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
|
||||
|
||||
typedef struct {
|
||||
int cause;
|
||||
int fast;
|
||||
void* handler;
|
||||
} dispatch_init_table_t;
|
||||
|
||||
dispatch_init_table_t __init dispatch_init_table[] = {
|
||||
|
||||
{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
|
||||
{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
|
||||
{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
|
||||
{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
|
||||
/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
|
||||
/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
|
||||
{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
|
||||
{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
|
||||
/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
|
||||
/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifdef CONFIG_UNALIGNED_USER
|
||||
{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
|
||||
#else
|
||||
{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
|
||||
#endif
|
||||
{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
|
||||
#endif
|
||||
{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
|
||||
{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
|
||||
{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
|
||||
{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
|
||||
/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
|
||||
{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
|
||||
{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
|
||||
{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
|
||||
{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
|
||||
{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
|
||||
/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
|
||||
{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
|
||||
{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
|
||||
{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
|
||||
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
|
||||
#if (XCHAL_CP_MASK & 1)
|
||||
COPROCESSOR(0),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 2)
|
||||
COPROCESSOR(1),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 4)
|
||||
COPROCESSOR(2),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 8)
|
||||
COPROCESSOR(3),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 16)
|
||||
COPROCESSOR(4),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 32)
|
||||
COPROCESSOR(5),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 64)
|
||||
COPROCESSOR(6),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 128)
|
||||
COPROCESSOR(7),
|
||||
#endif
|
||||
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
|
||||
{ -1, -1, 0 }
|
||||
|
||||
};
|
||||
|
||||
/* The exception table <exc_table> serves two functions:
|
||||
* 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
|
||||
* 2. it is a temporary memory buffer for the exception handlers.
|
||||
*/
|
||||
|
||||
unsigned long exc_table[EXC_TABLE_SIZE/4];
|
||||
|
||||
void die(const char*, struct pt_regs*, long);
|
||||
|
||||
static inline void
|
||||
__die_if_kernel(const char *str, struct pt_regs *regs, long err)
|
||||
{
|
||||
if (!user_mode(regs))
|
||||
die(str, regs, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unhandled Exceptions. Kill user task or panic if in kernel space.
|
||||
*/
|
||||
|
||||
void do_unhandled(struct pt_regs *regs, unsigned long exccause)
|
||||
{
|
||||
__die_if_kernel("Caught unhandled exception - should not happen",
|
||||
regs, SIGKILL);
|
||||
|
||||
/* If in user mode, send SIGILL signal to current process */
|
||||
printk("Caught unhandled exception in '%s' "
|
||||
"(pid = %d, pc = %#010lx) - should not happen\n"
|
||||
"\tEXCCAUSE is %ld\n",
|
||||
current->comm, current->pid, regs->pc, exccause);
|
||||
force_sig(SIGILL, current);
|
||||
}
|
||||
|
||||
/*
|
||||
* Multi-hit exception. This if fatal!
|
||||
*/
|
||||
|
||||
void do_multihit(struct pt_regs *regs, unsigned long exccause)
|
||||
{
|
||||
die("Caught multihit exception", regs, SIGKILL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Level-1 interrupt.
|
||||
* We currently have no priority encoding.
|
||||
*/
|
||||
|
||||
unsigned long ignored_level1_interrupts;
|
||||
extern void do_IRQ(int, struct pt_regs *);
|
||||
|
||||
void do_interrupt (struct pt_regs *regs)
|
||||
{
|
||||
unsigned long intread = get_sr (INTREAD);
|
||||
unsigned long intenable = get_sr (INTENABLE);
|
||||
int i, mask;
|
||||
|
||||
/* Handle all interrupts (no priorities).
|
||||
* (Clear the interrupt before processing, in case it's
|
||||
* edge-triggered or software-generated)
|
||||
*/
|
||||
|
||||
for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
|
||||
if (mask & (intread & intenable)) {
|
||||
set_sr (mask, INTCLEAR);
|
||||
do_IRQ (i,regs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Illegal instruction. Fatal if in kernel space.
|
||||
*/
|
||||
|
||||
void
|
||||
do_illegal_instruction(struct pt_regs *regs)
|
||||
{
|
||||
__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
|
||||
|
||||
/* If in user mode, send SIGILL signal to current process. */
|
||||
|
||||
printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
|
||||
current->comm, current->pid, regs->pc);
|
||||
force_sig(SIGILL, current);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Handle unaligned memory accesses from user space. Kill task.
|
||||
*
|
||||
* If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
|
||||
* accesses causes from user space.
|
||||
*/
|
||||
|
||||
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
|
||||
#ifndef CONFIG_UNALIGNED_USER
|
||||
void
|
||||
do_unaligned_user (struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
__die_if_kernel("Unhandled unaligned exception in kernel",
|
||||
regs, SIGKILL);
|
||||
|
||||
current->thread.bad_vaddr = regs->excvaddr;
|
||||
current->thread.error_code = -3;
|
||||
printk("Unaligned memory access to %08lx in '%s' "
|
||||
"(pid = %d, pc = %#010lx)\n",
|
||||
regs->excvaddr, current->comm, current->pid, regs->pc);
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = BUS_ADRALN;
|
||||
info.si_addr = (void *) regs->excvaddr;
|
||||
force_sig_info(SIGSEGV, &info, current);
|
||||
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void
|
||||
do_debug(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_KGDB
|
||||
/* If remote debugging is configured AND enabled, we give control to
|
||||
* kgdb. Otherwise, we fall through, perhaps giving control to the
|
||||
* native debugger.
|
||||
*/
|
||||
|
||||
if (gdb_enter) {
|
||||
extern void gdb_handle_exception(struct pt_regs *);
|
||||
gdb_handle_exception(regs);
|
||||
return_from_debug_flag = 1;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
__die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
|
||||
|
||||
/* If in user mode, send SIGTRAP signal to current process */
|
||||
|
||||
force_sig(SIGTRAP, current);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Initialize dispatch tables.
|
||||
*
|
||||
* The exception vectors are stored compressed the __init section in the
|
||||
* dispatch_init_table. This function initializes the following three tables
|
||||
* from that compressed table:
|
||||
* - fast user first dispatch table for user exceptions
|
||||
* - fast kernel first dispatch table for kernel exceptions
|
||||
* - default C-handler C-handler called by the default fast handler.
|
||||
*
|
||||
* See vectors.S for more details.
|
||||
*/
|
||||
|
||||
#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
|
||||
|
||||
void trap_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Setup default vectors. */
|
||||
|
||||
for(i = 0; i < 64; i++) {
|
||||
set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception);
|
||||
set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
|
||||
set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
|
||||
}
|
||||
|
||||
/* Setup specific handlers. */
|
||||
|
||||
for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
|
||||
|
||||
int fast = dispatch_init_table[i].fast;
|
||||
int cause = dispatch_init_table[i].cause;
|
||||
void *handler = dispatch_init_table[i].handler;
|
||||
|
||||
if (fast == 0)
|
||||
set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
|
||||
if (fast && fast & USER)
|
||||
set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
|
||||
if (fast && fast & KRNL)
|
||||
set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
|
||||
}
|
||||
|
||||
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
|
||||
|
||||
i = (unsigned long)exc_table;
|
||||
__asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function dumps the current valid window frame and other base registers.
|
||||
*/
|
||||
|
||||
void show_regs(struct pt_regs * regs)
|
||||
{
|
||||
int i, wmask;
|
||||
|
||||
wmask = regs->wmask & ~1;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (wmask & (1 << (i / 4)))
|
||||
break;
|
||||
if ((i % 8) == 0)
|
||||
printk ("\n" KERN_INFO "a%02d: ", i);
|
||||
printk("%08lx ", regs->areg[i]);
|
||||
}
|
||||
printk("\n");
|
||||
|
||||
printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
|
||||
regs->pc, regs->ps, regs->depc, regs->excvaddr);
|
||||
printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
|
||||
regs->lbeg, regs->lend, regs->lcount, regs->sar);
|
||||
if (user_mode(regs))
|
||||
printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
|
||||
regs->windowbase, regs->windowstart, regs->wmask,
|
||||
regs->syscall);
|
||||
}
|
||||
|
||||
void show_trace(struct task_struct *task, unsigned long *sp)
|
||||
{
|
||||
unsigned long a0, a1, pc;
|
||||
unsigned long sp_start, sp_end;
|
||||
|
||||
a1 = (unsigned long)sp;
|
||||
|
||||
if (a1 == 0)
|
||||
__asm__ __volatile__ ("mov %0, a1\n" : "=a"(a1));
|
||||
|
||||
|
||||
sp_start = a1 & ~(THREAD_SIZE-1);
|
||||
sp_end = sp_start + THREAD_SIZE;
|
||||
|
||||
printk("Call Trace:");
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
printk("\n");
|
||||
#endif
|
||||
spill_registers();
|
||||
|
||||
while (a1 > sp_start && a1 < sp_end) {
|
||||
sp = (unsigned long*)a1;
|
||||
|
||||
a0 = *(sp - 4);
|
||||
a1 = *(sp - 3);
|
||||
|
||||
if (a1 <= (unsigned long) sp)
|
||||
break;
|
||||
|
||||
pc = MAKE_PC_FROM_RA(a0, a1);
|
||||
|
||||
if (kernel_text_address(pc)) {
|
||||
printk(" [<%08lx>] ", pc);
|
||||
print_symbol("%s\n", pc);
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine abuses get_user()/put_user() to reference pointers
|
||||
* with at least a bit of error checking ...
|
||||
*/
|
||||
|
||||
static int kstack_depth_to_print = 24;
|
||||
|
||||
void show_stack(struct task_struct *task, unsigned long *sp)
|
||||
{
|
||||
int i = 0;
|
||||
unsigned long *stack;
|
||||
|
||||
if (sp == 0)
|
||||
__asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
|
||||
|
||||
stack = sp;
|
||||
|
||||
printk("\nStack: ");
|
||||
|
||||
for (i = 0; i < kstack_depth_to_print; i++) {
|
||||
if (kstack_end(sp))
|
||||
break;
|
||||
if (i && ((i % 8) == 0))
|
||||
printk("\n ");
|
||||
printk("%08lx ", *sp++);
|
||||
}
|
||||
printk("\n");
|
||||
show_trace(task, stack);
|
||||
}
|
||||
|
||||
void dump_stack(void)
|
||||
{
|
||||
show_stack(current, NULL);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
|
||||
void show_code(unsigned int *pc)
|
||||
{
|
||||
long i;
|
||||
|
||||
printk("\nCode:");
|
||||
|
||||
for(i = -3 ; i < 6 ; i++) {
|
||||
unsigned long insn;
|
||||
if (__get_user(insn, pc + i)) {
|
||||
printk(" (Bad address in pc)\n");
|
||||
break;
|
||||
}
|
||||
printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
void die(const char * str, struct pt_regs * regs, long err)
|
||||
{
|
||||
static int die_counter;
|
||||
int nl = 0;
|
||||
|
||||
console_verbose();
|
||||
spin_lock_irq(&die_lock);
|
||||
|
||||
printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
printk("PREEMPT ");
|
||||
nl = 1;
|
||||
#endif
|
||||
if (nl)
|
||||
printk("\n");
|
||||
show_regs(regs);
|
||||
if (!user_mode(regs))
|
||||
show_stack(NULL, (unsigned long*)regs->areg[1]);
|
||||
|
||||
spin_unlock_irq(&die_lock);
|
||||
|
||||
if (in_interrupt())
|
||||
panic("Fatal exception in interrupt");
|
||||
|
||||
if (panic_on_oops) {
|
||||
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(5 * HZ);
|
||||
panic("Fatal exception");
|
||||
}
|
||||
do_exit(err);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user