You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
xtensa: reorganize SR referencing
- reference SRs by names where possible, not by numbers; - get rid of __stringify around SR names where possible; - remove unneeded SR names from asm/regs.h; - add SREG_ prefix to remaining SR names; Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
This commit is contained in:
committed by
Chris Zankel
parent
f4349b6e01
commit
bc5378fcba
@@ -51,17 +51,17 @@ _start:
|
||||
/* 'reset' window registers */
|
||||
|
||||
movi a4, 1
|
||||
wsr a4, PS
|
||||
wsr a4, ps
|
||||
rsync
|
||||
|
||||
rsr a5, WINDOWBASE
|
||||
rsr a5, windowbase
|
||||
ssl a5
|
||||
sll a4, a4
|
||||
wsr a4, WINDOWSTART
|
||||
wsr a4, windowstart
|
||||
rsync
|
||||
|
||||
movi a4, 0x00040000
|
||||
wsr a4, PS
|
||||
wsr a4, ps
|
||||
rsync
|
||||
|
||||
/* copy the loader to its address
|
||||
|
||||
@@ -73,7 +73,7 @@ static inline void atomic_add(int i, atomic_t * v)
|
||||
"l32i %0, %2, 0 \n\t"
|
||||
"add %0, %0, %1 \n\t"
|
||||
"s32i %0, %2, 0 \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n"
|
||||
: "=&a" (vval)
|
||||
: "a" (i), "a" (v)
|
||||
@@ -97,7 +97,7 @@ static inline void atomic_sub(int i, atomic_t *v)
|
||||
"l32i %0, %2, 0 \n\t"
|
||||
"sub %0, %0, %1 \n\t"
|
||||
"s32i %0, %2, 0 \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n"
|
||||
: "=&a" (vval)
|
||||
: "a" (i), "a" (v)
|
||||
@@ -118,7 +118,7 @@ static inline int atomic_add_return(int i, atomic_t * v)
|
||||
"l32i %0, %2, 0 \n\t"
|
||||
"add %0, %0, %1 \n\t"
|
||||
"s32i %0, %2, 0 \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n"
|
||||
: "=&a" (vval)
|
||||
: "a" (i), "a" (v)
|
||||
@@ -137,7 +137,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
|
||||
"l32i %0, %2, 0 \n\t"
|
||||
"sub %0, %0, %1 \n\t"
|
||||
"s32i %0, %2, 0 \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n"
|
||||
: "=&a" (vval)
|
||||
: "a" (i), "a" (v)
|
||||
@@ -260,7 +260,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
"xor %1, %4, %3 \n\t"
|
||||
"and %0, %0, %4 \n\t"
|
||||
"s32i %0, %2, 0 \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n"
|
||||
: "=&a" (vval), "=a" (mask)
|
||||
: "a" (v), "a" (all_f), "1" (mask)
|
||||
@@ -277,7 +277,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
"l32i %0, %2, 0 \n\t"
|
||||
"or %0, %0, %1 \n\t"
|
||||
"s32i %0, %2, 0 \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n"
|
||||
: "=&a" (vval)
|
||||
: "a" (mask), "a" (v)
|
||||
|
||||
@@ -165,7 +165,7 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
|
||||
static inline u32 xtensa_get_cacheattr(void)
|
||||
{
|
||||
u32 r;
|
||||
asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
|
||||
asm volatile(" rsr %0, cacheattr" : "=a"(r));
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
|
||||
"bne %0, %2, 1f \n\t"
|
||||
"s32i %3, %1, 0 \n\t"
|
||||
"1: \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n\t"
|
||||
: "=&a" (old)
|
||||
: "a" (p), "a" (old), "r" (new)
|
||||
@@ -97,7 +97,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
|
||||
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
|
||||
"l32i %0, %1, 0 \n\t"
|
||||
"s32i %2, %1, 0 \n\t"
|
||||
"wsr a15, "__stringify(PS)" \n\t"
|
||||
"wsr a15, ps \n\t"
|
||||
"rsync \n\t"
|
||||
: "=&a" (tmp)
|
||||
: "a" (m), "a" (val)
|
||||
|
||||
@@ -94,11 +94,10 @@
|
||||
#if XCHAL_HAVE_CP
|
||||
|
||||
#define RSR_CPENABLE(x) do { \
|
||||
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
|
||||
__asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
|
||||
} while(0);
|
||||
#define WSR_CPENABLE(x) do { \
|
||||
__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \
|
||||
:: "a" (x)); \
|
||||
__asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
|
||||
} while(0);
|
||||
|
||||
#endif /* XCHAL_HAVE_CP */
|
||||
|
||||
@@ -27,7 +27,7 @@ static inline void __delay(unsigned long loops)
|
||||
static __inline__ u32 xtensa_get_ccount(void)
|
||||
{
|
||||
u32 ccount;
|
||||
asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount));
|
||||
asm volatile ("rsr %0, ccount\n" : "=r" (ccount));
|
||||
return ccount;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
asm volatile("rsr %0,"__stringify(PS) : "=a" (flags));
|
||||
asm volatile("rsr %0, ps" : "=a" (flags));
|
||||
return flags;
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ static inline void arch_local_irq_enable(void)
|
||||
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
asm volatile("wsr %0, "__stringify(PS)" ; rsync"
|
||||
asm volatile("wsr %0, ps; rsync"
|
||||
:: "a" (flags) : "memory");
|
||||
}
|
||||
|
||||
|
||||
@@ -51,14 +51,14 @@ extern unsigned long asid_cache;
|
||||
|
||||
static inline void set_rasid_register (unsigned long val)
|
||||
{
|
||||
__asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
|
||||
__asm__ __volatile__ (" wsr %0, rasid\n\t"
|
||||
" isync\n" : : "a" (val));
|
||||
}
|
||||
|
||||
static inline unsigned long get_rasid_register (void)
|
||||
{
|
||||
unsigned long tmp;
|
||||
__asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
|
||||
__asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
@@ -27,52 +27,15 @@
|
||||
|
||||
/* Special registers. */
|
||||
|
||||
#define LBEG 0
|
||||
#define LEND 1
|
||||
#define LCOUNT 2
|
||||
#define SAR 3
|
||||
#define BR 4
|
||||
#define SCOMPARE1 12
|
||||
#define ACCHI 16
|
||||
#define ACCLO 17
|
||||
#define MR 32
|
||||
#define WINDOWBASE 72
|
||||
#define WINDOWSTART 73
|
||||
#define PTEVADDR 83
|
||||
#define RASID 90
|
||||
#define ITLBCFG 91
|
||||
#define DTLBCFG 92
|
||||
#define IBREAKENABLE 96
|
||||
#define DDR 104
|
||||
#define IBREAKA 128
|
||||
#define DBREAKA 144
|
||||
#define DBREAKC 160
|
||||
#define EPC 176
|
||||
#define EPC_1 177
|
||||
#define DEPC 192
|
||||
#define EPS 192
|
||||
#define EPS_1 193
|
||||
#define EXCSAVE 208
|
||||
#define EXCSAVE_1 209
|
||||
#define INTERRUPT 226
|
||||
#define INTENABLE 228
|
||||
#define PS 230
|
||||
#define THREADPTR 231
|
||||
#define EXCCAUSE 232
|
||||
#define DEBUGCAUSE 233
|
||||
#define CCOUNT 234
|
||||
#define PRID 235
|
||||
#define ICOUNT 236
|
||||
#define ICOUNTLEVEL 237
|
||||
#define EXCVADDR 238
|
||||
#define CCOMPARE 240
|
||||
#define MISC_SR 244
|
||||
|
||||
/* Special names for read-only and write-only interrupt registers. */
|
||||
|
||||
#define INTREAD 226
|
||||
#define INTSET 226
|
||||
#define INTCLEAR 227
|
||||
#define SREG_MR 32
|
||||
#define SREG_IBREAKA 128
|
||||
#define SREG_DBREAKA 144
|
||||
#define SREG_DBREAKC 160
|
||||
#define SREG_EPC 176
|
||||
#define SREG_EPS 192
|
||||
#define SREG_EXCSAVE 208
|
||||
#define SREG_CCOMPARE 240
|
||||
#define SREG_MISC 244
|
||||
|
||||
/* EXCCAUSE register fields */
|
||||
|
||||
|
||||
@@ -63,10 +63,10 @@ extern cycles_t cacheflush_time;
|
||||
* Register access.
|
||||
*/
|
||||
|
||||
#define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r))
|
||||
#define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r))
|
||||
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r))
|
||||
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r))
|
||||
#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
|
||||
#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
|
||||
#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
|
||||
#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
|
||||
|
||||
static inline unsigned long get_ccount (void)
|
||||
{
|
||||
|
||||
@@ -86,26 +86,26 @@ static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
|
||||
|
||||
static inline void set_itlbcfg_register (unsigned long val)
|
||||
{
|
||||
__asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
|
||||
__asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
|
||||
: : "a" (val));
|
||||
}
|
||||
|
||||
static inline void set_dtlbcfg_register (unsigned long val)
|
||||
{
|
||||
__asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
|
||||
__asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
|
||||
: : "a" (val));
|
||||
}
|
||||
|
||||
static inline void set_ptevaddr_register (unsigned long val)
|
||||
{
|
||||
__asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
|
||||
__asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
|
||||
: : "a" (val));
|
||||
}
|
||||
|
||||
static inline unsigned long read_ptevaddr_register (void)
|
||||
{
|
||||
unsigned long tmp;
|
||||
__asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
|
||||
__asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
+19
-19
@@ -170,15 +170,15 @@ ENTRY(fast_unaligned)
|
||||
s32i a7, a2, PT_AREG7
|
||||
s32i a8, a2, PT_AREG8
|
||||
|
||||
rsr a0, DEPC
|
||||
xsr a3, EXCSAVE_1
|
||||
rsr a0, depc
|
||||
xsr a3, excsave1
|
||||
s32i a0, a2, PT_AREG2
|
||||
s32i a3, a2, PT_AREG3
|
||||
|
||||
/* Keep value of SAR in a0 */
|
||||
|
||||
rsr a0, SAR
|
||||
rsr a8, EXCVADDR # load unaligned memory address
|
||||
rsr a0, sar
|
||||
rsr a8, excvaddr # load unaligned memory address
|
||||
|
||||
/* Now, identify one of the following load/store instructions.
|
||||
*
|
||||
@@ -197,7 +197,7 @@ ENTRY(fast_unaligned)
|
||||
|
||||
/* Extract the instruction that caused the unaligned access. */
|
||||
|
||||
rsr a7, EPC_1 # load exception address
|
||||
rsr a7, epc1 # load exception address
|
||||
movi a3, ~3
|
||||
and a3, a3, a7 # mask lower bits
|
||||
|
||||
@@ -275,16 +275,16 @@ ENTRY(fast_unaligned)
|
||||
1:
|
||||
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a5, LEND # check if we reached LEND
|
||||
rsr a5, lend # check if we reached LEND
|
||||
bne a7, a5, 1f
|
||||
rsr a5, LCOUNT # and LCOUNT != 0
|
||||
rsr a5, lcount # and LCOUNT != 0
|
||||
beqz a5, 1f
|
||||
addi a5, a5, -1 # decrement LCOUNT and set
|
||||
rsr a7, LBEG # set PC to LBEGIN
|
||||
wsr a5, LCOUNT
|
||||
rsr a7, lbeg # set PC to LBEGIN
|
||||
wsr a5, lcount
|
||||
#endif
|
||||
|
||||
1: wsr a7, EPC_1 # skip load instruction
|
||||
1: wsr a7, epc1 # skip load instruction
|
||||
extui a4, a4, INSN_T, 4 # extract target register
|
||||
movi a5, .Lload_table
|
||||
addx8 a4, a4, a5
|
||||
@@ -355,16 +355,16 @@ ENTRY(fast_unaligned)
|
||||
|
||||
1:
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
rsr a4, LEND # check if we reached LEND
|
||||
rsr a4, lend # check if we reached LEND
|
||||
bne a7, a4, 1f
|
||||
rsr a4, LCOUNT # and LCOUNT != 0
|
||||
rsr a4, lcount # and LCOUNT != 0
|
||||
beqz a4, 1f
|
||||
addi a4, a4, -1 # decrement LCOUNT and set
|
||||
rsr a7, LBEG # set PC to LBEGIN
|
||||
wsr a4, LCOUNT
|
||||
rsr a7, lbeg # set PC to LBEGIN
|
||||
wsr a4, lcount
|
||||
#endif
|
||||
|
||||
1: wsr a7, EPC_1 # skip store instruction
|
||||
1: wsr a7, epc1 # skip store instruction
|
||||
movi a4, ~3
|
||||
and a4, a4, a8 # align memory address
|
||||
|
||||
@@ -406,7 +406,7 @@ ENTRY(fast_unaligned)
|
||||
|
||||
.Lexit:
|
||||
movi a4, 0
|
||||
rsr a3, EXCSAVE_1
|
||||
rsr a3, excsave1
|
||||
s32i a4, a3, EXC_TABLE_FIXUP
|
||||
|
||||
/* Restore working register */
|
||||
@@ -420,7 +420,7 @@ ENTRY(fast_unaligned)
|
||||
|
||||
/* restore SAR and return */
|
||||
|
||||
wsr a0, SAR
|
||||
wsr a0, sar
|
||||
l32i a0, a2, PT_AREG0
|
||||
l32i a2, a2, PT_AREG2
|
||||
rfe
|
||||
@@ -438,10 +438,10 @@ ENTRY(fast_unaligned)
|
||||
l32i a6, a2, PT_AREG6
|
||||
l32i a5, a2, PT_AREG5
|
||||
l32i a4, a2, PT_AREG4
|
||||
wsr a0, SAR
|
||||
wsr a0, sar
|
||||
mov a1, a2
|
||||
|
||||
rsr a0, PS
|
||||
rsr a0, ps
|
||||
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
|
||||
|
||||
movi a0, _kernel_exception
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
/* IO protection is currently unsupported. */
|
||||
|
||||
ENTRY(fast_io_protect)
|
||||
wsr a0, EXCSAVE_1
|
||||
wsr a0, excsave1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0
|
||||
|
||||
@@ -220,7 +220,7 @@ ENTRY(coprocessor_restore)
|
||||
*/
|
||||
|
||||
ENTRY(fast_coprocessor_double)
|
||||
wsr a0, EXCSAVE_1
|
||||
wsr a0, excsave1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0
|
||||
|
||||
@@ -229,13 +229,13 @@ ENTRY(fast_coprocessor)
|
||||
|
||||
/* Save remaining registers a1-a3 and SAR */
|
||||
|
||||
xsr a3, EXCSAVE_1
|
||||
xsr a3, excsave1
|
||||
s32i a3, a2, PT_AREG3
|
||||
rsr a3, SAR
|
||||
rsr a3, sar
|
||||
s32i a1, a2, PT_AREG1
|
||||
s32i a3, a2, PT_SAR
|
||||
mov a1, a2
|
||||
rsr a2, DEPC
|
||||
rsr a2, depc
|
||||
s32i a2, a1, PT_AREG2
|
||||
|
||||
/*
|
||||
@@ -248,17 +248,17 @@ ENTRY(fast_coprocessor)
|
||||
|
||||
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
|
||||
|
||||
rsr a3, EXCCAUSE
|
||||
rsr a3, exccause
|
||||
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
|
||||
|
||||
/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
|
||||
|
||||
ssl a3 # SAR: 32 - coprocessor_number
|
||||
movi a2, 1
|
||||
rsr a0, CPENABLE
|
||||
rsr a0, cpenable
|
||||
sll a2, a2
|
||||
or a0, a0, a2
|
||||
wsr a0, CPENABLE
|
||||
wsr a0, cpenable
|
||||
rsync
|
||||
|
||||
/* Retrieve previous owner. (a3 still holds CP number) */
|
||||
@@ -291,7 +291,7 @@ ENTRY(fast_coprocessor)
|
||||
|
||||
/* Note that only a0 and a1 were preserved. */
|
||||
|
||||
2: rsr a3, EXCCAUSE
|
||||
2: rsr a3, exccause
|
||||
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
|
||||
movi a0, coprocessor_owner
|
||||
addx4 a0, a3, a0
|
||||
@@ -321,7 +321,7 @@ ENTRY(fast_coprocessor)
|
||||
l32i a0, a1, PT_SAR
|
||||
l32i a3, a1, PT_AREG3
|
||||
l32i a2, a1, PT_AREG2
|
||||
wsr a0, SAR
|
||||
wsr a0, sar
|
||||
l32i a0, a1, PT_AREG0
|
||||
l32i a1, a1, PT_AREG1
|
||||
|
||||
|
||||
+126
-126
File diff suppressed because it is too large
Load Diff
+18
-18
@@ -61,18 +61,18 @@ _startup:
|
||||
/* Disable interrupts and exceptions. */
|
||||
|
||||
movi a0, LOCKLEVEL
|
||||
wsr a0, PS
|
||||
wsr a0, ps
|
||||
|
||||
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
|
||||
|
||||
wsr a2, EXCSAVE_1
|
||||
wsr a2, excsave1
|
||||
|
||||
/* Start with a fresh windowbase and windowstart. */
|
||||
|
||||
movi a1, 1
|
||||
movi a0, 0
|
||||
wsr a1, WINDOWSTART
|
||||
wsr a0, WINDOWBASE
|
||||
wsr a1, windowstart
|
||||
wsr a0, windowbase
|
||||
rsync
|
||||
|
||||
/* Set a0 to 0 for the remaining initialization. */
|
||||
@@ -82,46 +82,46 @@ _startup:
|
||||
/* Clear debugging registers. */
|
||||
|
||||
#if XCHAL_HAVE_DEBUG
|
||||
wsr a0, IBREAKENABLE
|
||||
wsr a0, ICOUNT
|
||||
wsr a0, ibreakenable
|
||||
wsr a0, icount
|
||||
movi a1, 15
|
||||
wsr a0, ICOUNTLEVEL
|
||||
wsr a0, icountlevel
|
||||
|
||||
.set _index, 0
|
||||
.rept XCHAL_NUM_DBREAK - 1
|
||||
wsr a0, DBREAKC + _index
|
||||
wsr a0, SREG_DBREAKC + _index
|
||||
.set _index, _index + 1
|
||||
.endr
|
||||
#endif
|
||||
|
||||
/* Clear CCOUNT (not really necessary, but nice) */
|
||||
|
||||
wsr a0, CCOUNT # not really necessary, but nice
|
||||
wsr a0, ccount # not really necessary, but nice
|
||||
|
||||
/* Disable zero-loops. */
|
||||
|
||||
#if XCHAL_HAVE_LOOPS
|
||||
wsr a0, LCOUNT
|
||||
wsr a0, lcount
|
||||
#endif
|
||||
|
||||
/* Disable all timers. */
|
||||
|
||||
.set _index, 0
|
||||
.rept XCHAL_NUM_TIMERS - 1
|
||||
wsr a0, CCOMPARE + _index
|
||||
wsr a0, SREG_CCOMPARE + _index
|
||||
.set _index, _index + 1
|
||||
.endr
|
||||
|
||||
/* Interrupt initialization. */
|
||||
|
||||
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
|
||||
wsr a0, INTENABLE
|
||||
wsr a2, INTCLEAR
|
||||
wsr a0, intenable
|
||||
wsr a2, intclear
|
||||
|
||||
/* Disable coprocessors. */
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
wsr a0, CPENABLE
|
||||
wsr a0, cpenable
|
||||
#endif
|
||||
|
||||
/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
|
||||
@@ -132,7 +132,7 @@ _startup:
|
||||
*/
|
||||
|
||||
movi a1, 1
|
||||
wsr a1, PS
|
||||
wsr a1, ps
|
||||
rsync
|
||||
|
||||
/* Initialize the caches.
|
||||
@@ -206,18 +206,18 @@ _startup:
|
||||
addi a1, a1, KERNEL_STACK_SIZE
|
||||
|
||||
movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
|
||||
wsr a2, PS # (enable reg-windows; progmode stack)
|
||||
wsr a2, ps # (enable reg-windows; progmode stack)
|
||||
rsync
|
||||
|
||||
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
|
||||
|
||||
movi a2, debug_exception
|
||||
wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
|
||||
wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
|
||||
|
||||
/* Set up EXCSAVE[1] to point to the exc_table. */
|
||||
|
||||
movi a6, exc_table
|
||||
xsr a6, EXCSAVE_1
|
||||
xsr a6, excsave1
|
||||
|
||||
/* init_arch kick-starts the linux kernel */
|
||||
|
||||
|
||||
@@ -72,13 +72,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
static void xtensa_irq_mask(struct irq_data *d)
|
||||
{
|
||||
cached_irq_mask &= ~(1 << d->irq);
|
||||
set_sr (cached_irq_mask, INTENABLE);
|
||||
set_sr (cached_irq_mask, intenable);
|
||||
}
|
||||
|
||||
static void xtensa_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
cached_irq_mask |= 1 << d->irq;
|
||||
set_sr (cached_irq_mask, INTENABLE);
|
||||
set_sr (cached_irq_mask, intenable);
|
||||
}
|
||||
|
||||
static void xtensa_irq_enable(struct irq_data *d)
|
||||
@@ -95,7 +95,7 @@ static void xtensa_irq_disable(struct irq_data *d)
|
||||
|
||||
static void xtensa_irq_ack(struct irq_data *d)
|
||||
{
|
||||
set_sr(1 << d->irq, INTCLEAR);
|
||||
set_sr(1 << d->irq, intclear);
|
||||
}
|
||||
|
||||
static int xtensa_irq_retrigger(struct irq_data *d)
|
||||
|
||||
@@ -202,8 +202,8 @@ extern void do_IRQ(int, struct pt_regs *);
|
||||
|
||||
void do_interrupt (struct pt_regs *regs)
|
||||
{
|
||||
unsigned long intread = get_sr (INTREAD);
|
||||
unsigned long intenable = get_sr (INTENABLE);
|
||||
unsigned long intread = get_sr (interrupt);
|
||||
unsigned long intenable = get_sr (intenable);
|
||||
int i, mask;
|
||||
|
||||
/* Handle all interrupts (no priorities).
|
||||
@@ -213,7 +213,7 @@ void do_interrupt (struct pt_regs *regs)
|
||||
|
||||
for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
|
||||
if (mask & (intread & intenable)) {
|
||||
set_sr (mask, INTCLEAR);
|
||||
set_sr (mask, intclear);
|
||||
do_IRQ (i,regs);
|
||||
}
|
||||
}
|
||||
@@ -339,7 +339,7 @@ void __init trap_init(void)
|
||||
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
|
||||
|
||||
i = (unsigned long)exc_table;
|
||||
__asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
|
||||
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -386,16 +386,16 @@ static inline void spill_registers(void)
|
||||
unsigned int a0, ps;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
|
||||
"movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t"
|
||||
"mov a12, a0\n\t"
|
||||
"rsr a13," __stringify(SAR) "\n\t"
|
||||
"xsr a14," __stringify(PS) "\n\t"
|
||||
"rsr a13, sar\n\t"
|
||||
"xsr a14, ps\n\t"
|
||||
"movi a0, _spill_registers\n\t"
|
||||
"rsync\n\t"
|
||||
"callx0 a0\n\t"
|
||||
"mov a0, a12\n\t"
|
||||
"wsr a13," __stringify(SAR) "\n\t"
|
||||
"wsr a14," __stringify(PS) "\n\t"
|
||||
"wsr a13, sar\n\t"
|
||||
"wsr a14, ps\n\t"
|
||||
:: "a" (&a0), "a" (&ps)
|
||||
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
|
||||
}
|
||||
|
||||
@@ -69,11 +69,11 @@
|
||||
|
||||
ENTRY(_UserExceptionVector)
|
||||
|
||||
xsr a3, EXCSAVE_1 # save a3 and get dispatch table
|
||||
wsr a2, DEPC # save a2
|
||||
xsr a3, excsave1 # save a3 and get dispatch table
|
||||
wsr a2, depc # save a2
|
||||
l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
|
||||
s32i a0, a2, PT_AREG0 # save a0 to ESF
|
||||
rsr a0, EXCCAUSE # retrieve exception cause
|
||||
rsr a0, exccause # retrieve exception cause
|
||||
s32i a0, a2, PT_DEPC # mark it as a regular exception
|
||||
addx4 a0, a0, a3 # find entry in table
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
|
||||
@@ -93,11 +93,11 @@ ENTRY(_UserExceptionVector)
|
||||
|
||||
ENTRY(_KernelExceptionVector)
|
||||
|
||||
xsr a3, EXCSAVE_1 # save a3, and get dispatch table
|
||||
wsr a2, DEPC # save a2
|
||||
xsr a3, excsave1 # save a3, and get dispatch table
|
||||
wsr a2, depc # save a2
|
||||
addi a2, a1, -16-PT_SIZE # adjust stack pointer
|
||||
s32i a0, a2, PT_AREG0 # save a0 to ESF
|
||||
rsr a0, EXCCAUSE # retrieve exception cause
|
||||
rsr a0, exccause # retrieve exception cause
|
||||
s32i a0, a2, PT_DEPC # mark it as a regular exception
|
||||
addx4 a0, a0, a3 # find entry in table
|
||||
l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
|
||||
@@ -205,17 +205,17 @@ ENTRY(_DoubleExceptionVector)
|
||||
|
||||
/* Deliberately destroy excsave (don't assume it's value was valid). */
|
||||
|
||||
wsr a3, EXCSAVE_1 # save a3
|
||||
wsr a3, excsave1 # save a3
|
||||
|
||||
/* Check for kernel double exception (usually fatal). */
|
||||
|
||||
rsr a3, PS
|
||||
rsr a3, ps
|
||||
_bbci.l a3, PS_UM_BIT, .Lksp
|
||||
|
||||
/* Check if we are currently handling a window exception. */
|
||||
/* Note: We don't need to indicate that we enter a critical section. */
|
||||
|
||||
xsr a0, DEPC # get DEPC, save a0
|
||||
xsr a0, depc # get DEPC, save a0
|
||||
|
||||
movi a3, XCHAL_WINDOW_VECTORS_VADDR
|
||||
_bltu a0, a3, .Lfixup
|
||||
@@ -243,21 +243,21 @@ ENTRY(_DoubleExceptionVector)
|
||||
* Note: We can trash the current window frame (a0...a3) and depc!
|
||||
*/
|
||||
|
||||
wsr a2, DEPC # save stack pointer temporarily
|
||||
rsr a0, PS
|
||||
wsr a2, depc # save stack pointer temporarily
|
||||
rsr a0, ps
|
||||
extui a0, a0, PS_OWB_SHIFT, 4
|
||||
wsr a0, WINDOWBASE
|
||||
wsr a0, windowbase
|
||||
rsync
|
||||
|
||||
/* We are now in the previous window frame. Save registers again. */
|
||||
|
||||
xsr a2, DEPC # save a2 and get stack pointer
|
||||
xsr a2, depc # save a2 and get stack pointer
|
||||
s32i a0, a2, PT_AREG0
|
||||
|
||||
wsr a3, EXCSAVE_1 # save a3
|
||||
wsr a3, excsave1 # save a3
|
||||
movi a3, exc_table
|
||||
|
||||
rsr a0, EXCCAUSE
|
||||
rsr a0, exccause
|
||||
s32i a0, a2, PT_DEPC # mark it as a regular exception
|
||||
addx4 a0, a0, a3
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER
|
||||
@@ -290,14 +290,14 @@ ENTRY(_DoubleExceptionVector)
|
||||
|
||||
/* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
|
||||
|
||||
xsr a3, DEPC
|
||||
xsr a3, depc
|
||||
s32i a0, a2, PT_DEPC
|
||||
s32i a3, a2, PT_AREG0
|
||||
|
||||
/* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
|
||||
|
||||
movi a3, exc_table
|
||||
rsr a0, EXCCAUSE
|
||||
rsr a0, exccause
|
||||
addx4 a0, a0, a3
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER
|
||||
jx a0
|
||||
@@ -312,7 +312,7 @@ ENTRY(_DoubleExceptionVector)
|
||||
|
||||
.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
|
||||
|
||||
rsr a3, EXCCAUSE
|
||||
rsr a3, exccause
|
||||
beqi a3, EXCCAUSE_ITLB_MISS, 1f
|
||||
addi a3, a3, -EXCCAUSE_DTLB_MISS
|
||||
bnez a3, .Lunrecoverable
|
||||
@@ -328,11 +328,11 @@ ENTRY(_DoubleExceptionVector)
|
||||
|
||||
.Lunrecoverable_fixup:
|
||||
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
|
||||
xsr a0, DEPC
|
||||
xsr a0, depc
|
||||
|
||||
.Lunrecoverable:
|
||||
rsr a3, EXCSAVE_1
|
||||
wsr a0, EXCSAVE_1
|
||||
rsr a3, excsave1
|
||||
wsr a0, excsave1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0
|
||||
|
||||
@@ -349,7 +349,7 @@ ENTRY(_DoubleExceptionVector)
|
||||
.section .DebugInterruptVector.text, "ax"
|
||||
|
||||
ENTRY(_DebugInterruptVector)
|
||||
xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
|
||||
xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
|
||||
jx a0
|
||||
|
||||
|
||||
|
||||
@@ -61,13 +61,13 @@ void platform_restart(void)
|
||||
* jump to the reset vector. */
|
||||
|
||||
__asm__ __volatile__("movi a2, 15\n\t"
|
||||
"wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
|
||||
"wsr a2, icountlevel\n\t"
|
||||
"movi a2, 0\n\t"
|
||||
"wsr a2, " __stringify(ICOUNT) "\n\t"
|
||||
"wsr a2, " __stringify(IBREAKENABLE) "\n\t"
|
||||
"wsr a2, " __stringify(LCOUNT) "\n\t"
|
||||
"wsr a2, icount\n\t"
|
||||
"wsr a2, ibreakenable\n\t"
|
||||
"wsr a2, lcount\n\t"
|
||||
"movi a2, 0x1f\n\t"
|
||||
"wsr a2, " __stringify(PS) "\n\t"
|
||||
"wsr a2, ps\n\t"
|
||||
"isync\n\t"
|
||||
"jx %0\n\t"
|
||||
:
|
||||
|
||||
@@ -66,13 +66,13 @@ void platform_restart(void)
|
||||
* jump to the reset vector. */
|
||||
|
||||
__asm__ __volatile__ ("movi a2, 15\n\t"
|
||||
"wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
|
||||
"wsr a2, icountlevel\n\t"
|
||||
"movi a2, 0\n\t"
|
||||
"wsr a2, " __stringify(ICOUNT) "\n\t"
|
||||
"wsr a2, " __stringify(IBREAKENABLE) "\n\t"
|
||||
"wsr a2, " __stringify(LCOUNT) "\n\t"
|
||||
"wsr a2, icount\n\t"
|
||||
"wsr a2, ibreakenable\n\t"
|
||||
"wsr a2, lcount\n\t"
|
||||
"movi a2, 0x1f\n\t"
|
||||
"wsr a2, " __stringify(PS) "\n\t"
|
||||
"wsr a2, ps\n\t"
|
||||
"isync\n\t"
|
||||
"jx %0\n\t"
|
||||
:
|
||||
|
||||
Reference in New Issue
Block a user