MIPS: MT: Remove SMTC support

Nobody is maintaining SMTC anymore and there also seems to be no userbase.
Which is a pity - the SMTC technology primarily developed by Kevin D.
Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT
ASE's power and elegance.

Based on Markos Chandras <Markos.Chandras@imgtec.com> patch
https://patchwork.linux-mips.org/patch/6719/ which while very similar did
no longer apply cleanly when I tried to merge it plus some additional
post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to
merge once upon a time.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Ralf Baechle
2014-05-23 16:29:44 +02:00
parent 8b2e62cc34
commit b633648c5a
64 changed files with 72 additions and 4097 deletions

View File

@@ -1852,7 +1852,7 @@ config FORCE_MAX_ZONEORDER
config CEVT_GIC
bool "Use GIC global counter for clock events"
depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
depends on IRQ_GIC && !MIPS_SEAD3
help
Use the GIC global counter for the clock events. The R4K clock
event driver is always present, so if the platform ends up not
@@ -1936,24 +1936,6 @@ config MIPS_MT_SMP
Intel Hyperthreading feature. For further information go to
<http://www.imgtec.com/mips/mips-multithreading.asp>.
config MIPS_MT_SMTC
bool "Use all TCs on all VPEs for SMP (DEPRECATED)"
depends on CPU_MIPS32_R2
depends on SYS_SUPPORTS_MULTITHREADING
depends on !MIPS_CPS
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select MIPS_MT
select SMP
select SMP_UP
select SYS_SUPPORTS_SMP
select NR_CPUS_DEFAULT_8
help
This is a kernel model which is known as SMTC. This is
supported on cores with the MT ASE and presents all TCs
available on all VPEs to support SMP. For further
information see <http://www.linux-mips.org/wiki/34K#SMTC>.
endchoice
config MIPS_MT
@@ -1977,7 +1959,7 @@ config SYS_SUPPORTS_MULTITHREADING
config MIPS_MT_FPAFF
bool "Dynamic FPU affinity for FP-intensive threads"
default y
depends on MIPS_MT_SMP || MIPS_MT_SMTC
depends on MIPS_MT_SMP
config MIPS_VPE_LOADER
bool "VPE loader support."
@@ -1999,29 +1981,6 @@ config MIPS_VPE_LOADER_MT
default "y"
depends on MIPS_VPE_LOADER && !MIPS_CMP
config MIPS_MT_SMTC_IM_BACKSTOP
bool "Use per-TC register bits as backstop for inhibited IM bits"
depends on MIPS_MT_SMTC
default n
help
To support multiple TC microthreads acting as "CPUs" within
a VPE, VPE-wide interrupt mask bits must be specially manipulated
during interrupt handling. To support legacy drivers and interrupt
controller management code, SMTC has a "backstop" to track and
if necessary restore the interrupt mask. This has some performance
impact on interrupt service overhead.
config MIPS_MT_SMTC_IRQAFF
bool "Support IRQ affinity API"
depends on MIPS_MT_SMTC
default n
help
Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
for SMTC Linux kernel. Requires platform support, of which
an example can be found in the MIPS kernel i8259 and Malta
platform code. Adds some overhead to interrupt dispatch, and
should be used only if you know what you are doing.
config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux"
depends on MIPS_VPE_LOADER
@@ -2049,7 +2008,7 @@ config MIPS_VPE_APSP_API_MT
config MIPS_CMP
bool "MIPS CMP framework support (DEPRECATED)"
depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC
depends on SYS_SUPPORTS_MIPS_CMP
select MIPS_GIC_IPI
select SYNC_R4K
select WEAK_ORDERING
@@ -2256,7 +2215,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP)
depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP)
default y
help
Enable hardware performance counter support for perf events. If

View File

@@ -79,15 +79,6 @@ config CMDLINE_OVERRIDE
Normally, you will choose 'N' here.
config SMTC_IDLE_HOOK_DEBUG
bool "Enable additional debug checks before going into CPU idle loop"
depends on DEBUG_KERNEL && MIPS_MT_SMTC
help
This option enables Enable additional debug checks before going into
CPU idle loop. For details on these checks, see
arch/mips/kernel/smtc.c. This debugging option result in significant
overhead so should be disabled in production kernels.
config SB1XXX_CORELIS
bool "Corelis Debugger"
depends on SIBYTE_SB1xxx_SOC

View File

@@ -1,196 +0,0 @@
CONFIG_MIPS_MALTA=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_PAGE_SIZE_16KB=y
CONFIG_MIPS_MT_SMTC=y
# CONFIG_MIPS_MT_FPAFF is not set
CONFIG_NR_CPUS=9
CONFIG_HZ_48=y
CONFIG_LOCALVERSION="smtc"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_CLS_IND=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
# CONFIG_IDE_PROC_FS is not set
# CONFIG_IDEPCI_PCIBUS_ORDER is not set
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_ALTEON is not set
CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_TOSHIBA is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_WLAN is not set
# CONFIG_VT is not set
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_G=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_IDE_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set

View File

@@ -17,26 +17,8 @@
#ifdef CONFIG_64BIT
#include <asm/asmmacro-64.h>
#endif
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif
#ifdef CONFIG_MIPS_MT_SMTC
.macro local_irq_enable reg=t0
mfc0 \reg, CP0_TCSTATUS
ori \reg, \reg, TCSTATUS_IXMT
xori \reg, \reg, TCSTATUS_IXMT
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
.macro local_irq_disable reg=t0
mfc0 \reg, CP0_TCSTATUS
ori \reg, \reg, TCSTATUS_IXMT
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
#elif defined(CONFIG_CPU_MIPSR2)
#ifdef CONFIG_CPU_MIPSR2
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
@@ -71,7 +53,7 @@
sw \reg, TI_PRE_COUNT($28)
#endif
.endm
#endif /* CONFIG_MIPS_MT_SMTC */
#endif /* CONFIG_CPU_MIPSR2 */
.macro fpu_save_16even thread tmp=t0
cfc1 \tmp, fcr31

View File

@@ -65,17 +65,12 @@ struct cpuinfo_mips {
#ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */
#endif
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
#ifdef CONFIG_MIPS_MT_SMP
/*
* In the MIPS MT "SMTC" model, each TC is considered
* to be a "CPU" for the purposes of scheduling, but
* exception resources, ASID spaces, etc, are common
* to all TCs within the same VPE.
* There is not necessarily a 1:1 mapping of VPE num to CPU number
* in particular on multi-core systems.
*/
int vpe_id; /* Virtual Processor number */
#endif
#ifdef CONFIG_MIPS_MT_SMTC
int tc_id; /* Thread Context number */
#endif
void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */
@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args {
unsigned long n;
};
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
#ifdef CONFIG_MIPS_MT_SMP
# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
#else
# define cpu_vpe_id(cpuinfo) 0

View File

@@ -48,11 +48,7 @@
enum fixed_addresses {
#define FIX_N_COLOURS 8
FIX_CMAP_BEGIN,
#ifdef CONFIG_MIPS_MT_SMTC
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2),
#else
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
#endif
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN = FIX_CMAP_END + 1,

View File

@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq)
#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
#endif
#ifdef CONFIG_MIPS_MT_SMTC
struct irqaction;
extern unsigned long irq_hwmask[];
extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask);
static inline void smtc_im_ack_irq(unsigned int irq)
{
if (irq_hwmask[irq] & ST0_IM)
set_c0_status(irq_hwmask[irq] & ST0_IM);
}
#else
static inline void smtc_im_ack_irq(unsigned int irq)
{
}
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
#include <linux/cpumask.h>
extern int plat_set_irq_affinity(struct irq_data *d,
const struct cpumask *affinity, bool force);
extern void smtc_forward_irq(struct irq_data *d);
/*
* IRQ affinity hook invoked at the beginning of interrupt dispatch
* if option is enabled.
*
* Up through Linux 2.6.22 (at least) cpumask operations are very
* inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
* used a "fast path" per-IRQ-descriptor cache of affinity information
* to reduce latency. As there is a project afoot to optimize the
* cpumask implementations, this version is optimistically assuming
* that cpumask.h macro overhead is reasonable during interrupt dispatch.
*/
static inline int handle_on_other_cpu(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
if (cpumask_test_cpu(smp_processor_id(), d->affinity))
return 0;
smtc_forward_irq(d);
return 1;
}
#else /* Not doing SMTC affinity */
static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
static inline void smtc_im_backstop(unsigned int irq)
{
if (irq_hwmask[irq] & 0x0000ff00)
write_c0_tccontext(read_c0_tccontext() &
~(irq_hwmask[irq] & 0x0000ff00));
}
/*
* Clear interrupt mask handling "backstop" if irq_hwmask
* entry so indicates. This implies that the ack() or end()
* functions will take over re-enabling the low-level mask.
* Otherwise it will be done on return from exception.
*/
static inline int smtc_handle_on_other_cpu(unsigned int irq)
{
int ret = handle_on_other_cpu(irq);
if (!ret)
smtc_im_backstop(irq);
return ret;
}
#else
static inline void smtc_im_backstop(unsigned int irq) { }
static inline int smtc_handle_on_other_cpu(unsigned int irq)
{
return handle_on_other_cpu(irq);
}
#endif
extern void do_IRQ(unsigned int irq);
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
extern void do_IRQ_no_affinity(unsigned int irq);
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
extern void arch_init_irq(void);
extern void spurious_interrupt(void);

View File

@@ -17,7 +17,7 @@
#include <linux/stringify.h>
#include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
#ifdef CONFIG_CPU_MIPSR2
static inline void arch_local_irq_disable(void)
{
@@ -118,30 +118,15 @@ void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
extern void smtc_ipi_replay(void);
#endif /* CONFIG_CPU_MIPSR2 */
static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__(
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
#if defined(CONFIG_CPU_MIPSR2)
" ei \n"
#else
" mfc0 $1,$12 \n"
@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void)
asm __volatile__(
" .set push \n"
" .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 %[flags], $2, 1 \n"
#else
" mfc0 %[flags], $12 \n"
#endif
" .set pop \n"
: [flags] "=r" (flags));
@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void)
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
*/
return flags & 0x400;
#else
return !(flags & 1);
#endif
}
#endif /* #ifndef __ASSEMBLY__ */

View File

@@ -80,36 +80,6 @@
.endm
.macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
and t0, 1<<2
bnez t0, 0f
9:
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonmt_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1: b 1b
__INITDATA
nonmt_processor:
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
#endif
#ifdef CONFIG_EVA
sync

View File

@@ -10,37 +10,6 @@
#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
.macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
and t0, 1<<2
bnez t0, 0f
9 :
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonmt_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1 : b 1b
__INITDATA
nonmt_processor :
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
0 :
#endif
.endm
/*

View File

@@ -1,7 +1,6 @@
/*
* Definitions and decalrations for MIPS MT support
* that are common between SMTC, VSMP, and/or AP/SP
* kernel models.
* Definitions and decalrations for MIPS MT support that are common between
* the VSMP, and AP/SP kernel models.
*/
#ifndef __ASM_MIPS_MT_H
#define __ASM_MIPS_MT_H

View File

@@ -1014,19 +1014,8 @@ do { \
#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
#define read_c0_status() __read_32bit_c0_register($12, 0)
#ifdef CONFIG_MIPS_MT_SMTC
#define write_c0_status(val) \
do { \
__write_32bit_c0_register($12, 0, val); \
__ehb(); \
} while (0)
#else
/*
* Legacy non-SMTC code, which may be hazardous
* but which might not support EHB
*/
#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
#endif /* CONFIG_MIPS_MT_SMTC */
#define read_c0_cause() __read_32bit_c0_register($13, 0)
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
@@ -1750,11 +1739,6 @@ static inline void tlb_write_random(void)
/*
* Manipulate bits in a c0 register.
*/
#ifndef CONFIG_MIPS_MT_SMTC
/*
* SMTC Linux requires shutting-down microthread scheduling
* during CP0 register read-modify-write sequences.
*/
#define __BUILD_SET_C0(name) \
static inline unsigned int \
set_c0_##name(unsigned int set) \
@@ -1793,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \
return res; \
}
#else /* SMTC versions that manage MT scheduling */
#include <linux/irqflags.h>
/*
* This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
* header file recursion.
*/
static inline unsigned int __dmt(void)
{
int res;
__asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n"
" .set noat \n"
" .word 0x41610BC1 # dmt $1 \n"
" ehb \n"
" move %0, $1 \n"
" .set pop \n"
: "=r" (res));
instruction_hazard();
return res;
}
#define __VPECONTROL_TE_SHIFT 15
#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
#define __EMT_ENABLE __VPECONTROL_TE
static inline void __emt(unsigned int previous)
{
if ((previous & __EMT_ENABLE))
__asm__ __volatile__(
" .set mips32r2 \n"
" .word 0x41600be1 # emt \n"
" ehb \n"
" .set mips0 \n");
}
static inline void __ehb(void)
{
__asm__ __volatile__(
" .set mips32r2 \n"
" ehb \n" " .set mips0 \n");
}
/*
* Note that local_irq_save/restore affect TC-specific IXMT state,
* not Status.IE as in non-SMTC kernel.
*/
#define __BUILD_SET_C0(name) \
static inline unsigned int \
set_c0_##name(unsigned int set) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
new = res | set; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
} \
\
static inline unsigned int \
clear_c0_##name(unsigned int clear) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
new = res & ~clear; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
} \
\
static inline unsigned int \
change_c0_##name(unsigned int change, unsigned int newbits) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
\
omt = __dmt(); \
res = read_c0_##name(); \
new = res & ~change; \
new |= (newbits & change); \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
}
#endif
__BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)

View File

@@ -18,10 +18,6 @@
#include <asm/cacheflush.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#include <asm/smtc.h>
#endif /* SMTC */
#include <asm-generic/mm_hooks.h>
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
@@ -63,13 +59,6 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x10
#define ASID_MASK 0xff0
#elif defined(CONFIG_MIPS_MT_SMTC)
#define ASID_INC 0x1
extern unsigned long smtc_asid_mask;
#define ASID_MASK (smtc_asid_mask)
#define HW_ASID_MASK 0xff
/* End SMTC/34K debug hack */
#else /* FIXME: not correct for R6000 */
#define ASID_INC 0x1
@@ -92,7 +81,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
#ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
@@ -115,12 +103,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
#else /* CONFIG_MIPS_MT_SMTC */
#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* Initialize the context related info for a new mm_struct
* instance.
@@ -141,46 +123,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
unsigned long flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
unsigned long mtflags;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
local_irq_save(flags);
mtflags = dvpe();
#else /* Not SMTC */
local_irq_save(flags);
#endif /* CONFIG_MIPS_MT_SMTC */
/* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
get_new_mmu_context(next, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/*
* If the EntryHi ASID being replaced happens to be
* the value flagged at ASID recycling time as having
* an extended life, clear the bit showing it being
* in use by this "CPU", and if that's the last bit,
* free up the ASID value for use and flush any old
* instances of it from the TLB.
*/
oldasid = (read_c0_entryhi() & ASID_MASK);
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/*
* Tread softly on EntryHi, and so long as we support
* having ASID_MASK smaller than the hardware maximum,
* make sure no "soft" bits become "hard"...
*/
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
write_c0_entryhi(cpu_asid(cpu, next));
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/*
@@ -213,34 +161,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
unsigned long flags;
unsigned int cpu = smp_processor_id();
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
unsigned long mtflags;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags);
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
mtflags = dvpe();
oldasid = read_c0_entryhi() & ASID_MASK;
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
write_c0_entryhi(cpu_asid(cpu, next));
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */
@@ -258,48 +184,15 @@ static inline void
drop_mmu_context(struct mm_struct *mm, unsigned cpu)
{
unsigned long flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
/* Can't use spinlock because called from TLB flush within DVPE */
unsigned int prevvpe;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags);
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
prevvpe = dvpe();
oldasid = (read_c0_entryhi() & ASID_MASK);
if (smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
| cpu_asid(cpu, mm));
ehb(); /* Make sure it propagates to TCStatus */
evpe(prevvpe);
#else /* not CONFIG_MIPS_MT_SMTC */
write_c0_entryhi(cpu_asid(cpu, mm));
#endif /* CONFIG_MIPS_MT_SMTC */
} else {
/* will get a new context next time */
#ifndef CONFIG_MIPS_MT_SMTC
cpu_context(cpu, mm) = 0;
#else /* SMTC */
int i;
/* SMTC shares the TLB (and ASIDs) across VPEs */
for_each_online_cpu(i) {
if((smtc_status & SMTC_TLB_SHARED)
|| (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = 0;
}
#endif /* CONFIG_MIPS_MT_SMTC */
}
local_irq_restore(flags);
}

View File

@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr)
#define MODULE_KERNEL_TYPE "64BIT "
#endif
#ifdef CONFIG_MIPS_MT_SMTC
#define MODULE_KERNEL_SMTC "MT_SMTC "
#else
#define MODULE_KERNEL_SMTC ""
#endif
#define MODULE_ARCH_VERMAGIC \
MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC
MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
#endif /* _ASM_MODULE_H */

View File

@@ -39,9 +39,6 @@ struct pt_regs {
unsigned long cp0_badvaddr;
unsigned long cp0_cause;
unsigned long cp0_epc;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long cp0_tcstatus;
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON
unsigned long long mpl[3]; /* MTM{0,1,2} */
unsigned long long mtp[3]; /* MTP{0,1,2} */

View File

@@ -43,11 +43,10 @@
: "i" (op), "R" (*(unsigned char *)(addr)))
#ifdef CONFIG_MIPS_MT
/*
* Temporary hacks for SMTC debug. Optionally force single-threaded
* execution during I-cache flushes.
*/
/*
* Optionally force single-threaded execution during I-cache flushes.
*/
#define PROTECT_CACHE_FLUSHES 1
#ifdef PROTECT_CACHE_FLUSHES

View File

@@ -1,78 +0,0 @@
#ifndef _ASM_SMTC_MT_H
#define _ASM_SMTC_MT_H
/*
* Definitions for SMTC multitasking on MIPS MT cores
*/
#include <asm/mips_mt.h>
#include <asm/smtc_ipi.h>
/*
* System-wide SMTC status information
*/
extern unsigned int smtc_status;
#define SMTC_TLB_SHARED 0x00000001
#define SMTC_MTC_ACTIVE 0x00000002
/*
* TLB/ASID Management information
*/
#define MAX_SMTC_TLBS 2
#define MAX_SMTC_ASIDS 256
#if NR_CPUS <= 8
typedef char asiduse;
#else
#if NR_CPUS <= 16
typedef short asiduse;
#else
typedef long asiduse;
#endif
#endif
/*
* VPE Management information
*/
#define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */
extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
struct mm_struct;
struct task_struct;
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
void self_ipi(struct smtc_ipi *);
void smtc_flush_tlb_asid(unsigned long asid);
extern int smtc_build_cpu_map(int startslot);
extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
extern void smtc_init_secondary(void);
/*
* Sharing the TLB between multiple VPEs means that the
* "random" index selection function is not allowed to
* select the current value of the Index register. To
* avoid additional TLB pressure, the Index registers
* are "parked" with an non-Valid value.
*/
#define PARKED_INDEX ((unsigned int)0x80000000)
/*
* Define low-level interrupt mask for IPIs, if necessary.
* By default, use SW interrupt 1, which requires no external
* hardware support, but which works only for single-core
* MIPS MT systems.
*/
#ifndef MIPS_CPU_IPI_IRQ
#define MIPS_CPU_IPI_IRQ 1
#endif
#endif /* _ASM_SMTC_MT_H */

View File

@@ -1,129 +0,0 @@
/*
* Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
*/
#ifndef __ASM_SMTC_IPI_H
#define __ASM_SMTC_IPI_H
#include <linux/spinlock.h>
//#define SMTC_IPI_DEBUG
#ifdef SMTC_IPI_DEBUG
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#endif /* SMTC_IPI_DEBUG */
/*
* An IPI "message"
*/
struct smtc_ipi {
struct smtc_ipi *flink;
int type;
void *arg;
int dest;
#ifdef SMTC_IPI_DEBUG
int sender;
long stamp;
#endif /* SMTC_IPI_DEBUG */
};
/*
* Defined IPI Types
*/
#define LINUX_SMP_IPI 1
#define SMTC_CLOCK_TICK 2
#define IRQ_AFFINITY_IPI 3
/*
* A queue of IPI messages
*/
struct smtc_ipi_q {
struct smtc_ipi *head;
spinlock_t lock;
struct smtc_ipi *tail;
int depth;
int resched_flag; /* reschedule already queued */
};
static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->head == NULL)
q->head = q->tail = p;
else
q->tail->flink = p;
p->flink = NULL;
q->tail = p;
q->depth++;
#ifdef SMTC_IPI_DEBUG
p->sender = read_c0_tcbind();
p->stamp = read_c0_count();
#endif /* SMTC_IPI_DEBUG */
spin_unlock_irqrestore(&q->lock, flags);
}
static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
{
struct smtc_ipi *p;
if (q->head == NULL)
p = NULL;
else {
p = q->head;
q->head = q->head->flink;
q->depth--;
/* Arguably unnecessary, but leaves queue cleaner */
if (q->head == NULL)
q->tail = NULL;
}
return p;
}
static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
{
unsigned long flags;
struct smtc_ipi *p;
spin_lock_irqsave(&q->lock, flags);
p = __smtc_ipi_dq(q);
spin_unlock_irqrestore(&q->lock, flags);
return p;
}
static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->head == NULL) {
q->head = q->tail = p;
p->flink = NULL;
} else {
p->flink = q->head;
q->head = p;
}
q->depth++;
spin_unlock_irqrestore(&q->lock, flags);
}
static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&q->lock, flags);
retval = q->depth;
spin_unlock_irqrestore(&q->lock, flags);
return retval;
}
extern void smtc_send_ipi(int cpu, int type, unsigned int action);
#endif /* __ASM_SMTC_IPI_H */

View File

@@ -1,23 +0,0 @@
/*
* Definitions for SMTC /proc entries
* Copyright(C) 2005 MIPS Technologies Inc.
*/
#ifndef __ASM_SMTC_PROC_H
#define __ASM_SMTC_PROC_H
/*
* per-"CPU" statistics
*/
struct smtc_cpu_proc {
unsigned long timerints;
unsigned long selfipis;
};
extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
/* Count of number of recoveries of "stolen" FPU access rights on 34K */
extern atomic_t smtc_fpu_recoveries;
#endif /* __ASM_SMTC_PROC_H */

View File

@@ -19,22 +19,12 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
/*
* For SMTC kernel, global IE should be left set, and interrupts
* controlled exclusively via IXMT.
*/
#ifdef CONFIG_MIPS_MT_SMTC
#define STATMASK 0x1e
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define STATMASK 0x3f
#else
#define STATMASK 0x1f
#endif
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
.macro SAVE_AT
.set push
.set noat
@@ -186,16 +176,6 @@
mfc0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp)
LONG_S v1, PT_STATUS(sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Ideally, these instructions would be shuffled in
* to cover the pipeline delay.
*/
.set mips32
mfc0 k0, CP0_TCSTATUS
.set mips0
LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp)
mfc0 v1, CP0_CAUSE
LONG_S $5, PT_R5(sp)
@@ -321,36 +301,6 @@
.set push
.set reorder
.set noat
#ifdef CONFIG_MIPS_MT_SMTC
.set mips32r2
/*
* We need to make sure the read-modify-write
* of Status below isn't perturbed by an interrupt
* or cross-TC access, so we need to do at least a DMT,
* protected by an interrupt-inhibit. But setting IXMT
* also creates a few-cycle window where an IPI could
* be queued and not be detected before potentially
* returning to a WAIT or user-mode loop. It must be
* replayed.
*
* We're in the middle of a context switch, and
* we can't dispatch it directly without trashing
* some registers, so we'll try to detect this unlikely
* case and program a software interrupt in the VPE,
* as would be done for a cross-VPE IPI. To accommodate
* the handling of that case, we're doing a DVPE instead
* of just a DMT here to protect against other threads.
* This is a lot of cruft to cover a tiny window.
* If you can find a better design, implement it!
*
*/
mfc0 v0, CP0_TCSTATUS
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
DVPE 5 # dvpe a1
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 a0, CP0_STATUS
ori a0, STATMASK
xori a0, STATMASK
@@ -362,59 +312,6 @@
and v0, v1
or v0, a0
mtc0 v0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Only after EXL/ERL have been restored to status can we
* restore TCStatus.IXMT.
*/
LONG_L v1, PT_TCSTATUS(sp)
_ehb
mfc0 a0, CP0_TCSTATUS
andi v1, TCSTATUS_IXMT
bnez v1, 0f
/*
* We'd like to detect any IPIs queued in the tiny window
* above and request an software interrupt to service them
* when we ERET.
*
* Computing the offset into the IPIQ array of the executing
* TC's IPI queue in-line would be tedious. We use part of
* the TCContext register to hold 16 bits of offset that we
* can add in-line to find the queue head.
*/
mfc0 v0, CP0_TCCONTEXT
la a2, IPIQ
srl v0, v0, 16
addu a2, a2, v0
LONG_L v0, 0(a2)
beqz v0, 0f
/*
* If we have a queue, provoke dispatch within the VPE by setting C_SW1
*/
mfc0 v0, CP0_CAUSE
ori v0, v0, C_SW1
mtc0 v0, CP0_CAUSE
0:
/*
* This test should really never branch but
* let's be prudent here. Having atomized
* the shared register modifications, we can
* now EVPE, and must do so before interrupts
* are potentially re-enabled.
*/
andi a1, a1, MVPCONTROL_EVP
beqz a1, 1f
evpe
1:
/* We know that TCStatua.IXMT should be set from above */
xori a0, a0, TCSTATUS_IXMT
or a0, a0, v1
mtc0 a0, CP0_TCSTATUS
_ehb
.set mips0
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v1, PT_EPC(sp)
MTC0 v1, CP0_EPC
LONG_L $31, PT_R31(sp)
@@ -467,33 +364,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro CLI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK
or t0, t1
xori t0, STATMASK
mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
* For SMTC, we need to set privilege
* and disable interrupts only for the
* current TC, using the TCStatus register.
*/
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU, leave IXMT */
xori t0, 0x00001800
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
ori t0, ST0_EXL | ST0_ERL
xori t0, ST0_EXL | ST0_ERL
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard
.endm
@@ -502,35 +377,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro STI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK
or t0, t1
xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
* For SMTC, we need to set privilege
* and enable interrupts only for the
* current TC, using the TCStatus register.
*/
_ehb
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU *and* IXMT */
xori t0, 0x00001c00
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
ori t0, ST0_EXL
xori t0, ST0_EXL
mtc0 t0, CP0_STATUS
/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
#endif /* CONFIG_MIPS_MT_SMTC */
irq_enable_hazard
.endm
@@ -540,32 +391,6 @@
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro KMODE
#ifdef CONFIG_MIPS_MT_SMTC
/*
* This gets baroque in SMTC. We want to
* protect the non-atomic clearing of EXL
* with DMT/EMT, but we don't want to take
* an interrupt while DMT is still in effect.
*/
/* KMODE gets invoked from both reorder and noreorder code */
.set push
.set mips32r2
.set noreorder
mfc0 v0, CP0_TCSTATUS
andi v1, v0, TCSTATUS_IXMT
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
DMT 2 # dmt v0
/*
* We don't know a priori if ra is "live"
*/
move t0, ra
jal mips_ihb
nop /* delay slot */
move ra, t0
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | (STATMASK & ~1)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -576,25 +401,6 @@
or t0, t1
xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
_ehb
andi v0, v0, VPECONTROL_TE
beqz v0, 2f
nop /* delay slot */
emt
2:
mfc0 v0, CP0_TCSTATUS
/* Clear IXMT, then OR in previous value */
ori v0, TCSTATUS_IXMT
xori v0, TCSTATUS_IXMT
or v0, v1, v0
mtc0 v0, CP0_TCSTATUS
/*
* irq_disable_hazard below should expand to EHB
* on 24K/34K CPUS
*/
.set pop
#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard
.endm

Some files were not shown because too many files have changed in this diff Show More