mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
LoongArch: Add multi-processor (SMP) support
LoongArch-based procesors have 4, 8 or 16 cores per package. This patch adds multi-processor (SMP) support for LoongArch. Reviewed-by: WANG Xuerui <git@xen0n.name> Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
@@ -64,6 +64,7 @@ config LOONGARCH
|
||||
select GENERIC_LIB_UCMPDI2
|
||||
select GENERIC_PCI_IOMAP
|
||||
select GENERIC_SCHED_CLOCK
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select GPIOLIB
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
@@ -92,7 +93,7 @@ config LOONGARCH
|
||||
select HAVE_RSEQ
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_TIF_NOHZ
|
||||
select HAVE_VIRT_CPU_ACCOUNTING_GEN
|
||||
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
|
||||
select IRQ_FORCED_THREADING
|
||||
select IRQ_LOONGARCH_CPU
|
||||
select MODULES_USE_ELF_RELA if MODULES
|
||||
@@ -297,6 +298,43 @@ config EFI
|
||||
This enables the kernel to use EFI runtime services that are
|
||||
available (such as the EFI variable services).
|
||||
|
||||
config SMP
|
||||
bool "Multi-Processing support"
|
||||
help
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, say N. If you have a system with more
|
||||
than one CPU, say Y.
|
||||
|
||||
If you say N here, the kernel will run on uni- and multiprocessor
|
||||
machines, but will use only one CPU of a multiprocessor machine. If
|
||||
you say Y here, the kernel will run on many, but not all,
|
||||
uniprocessor machines. On a uniprocessor machine, the kernel
|
||||
will run faster if you say N here.
|
||||
|
||||
See also the SMP-HOWTO available at <http://www.tldp.org/docs.html#howto>.
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs"
|
||||
depends on SMP
|
||||
select GENERIC_IRQ_MIGRATION
|
||||
help
|
||||
Say Y here to allow turning CPUs off and on. CPUs can be
|
||||
controlled through /sys/devices/system/cpu.
|
||||
(Note: power management support will enable this option
|
||||
automatically on SMP systems. )
|
||||
Say N if you want to disable CPU hotplug.
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-256)"
|
||||
range 2 256
|
||||
depends on SMP
|
||||
default "64"
|
||||
help
|
||||
This allows you to specify the maximum number of CPUs which this
|
||||
kernel will support.
|
||||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int "Maximum zone order"
|
||||
range 14 64 if PAGE_SIZE_64KB
|
||||
|
||||
@@ -162,6 +162,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
|
||||
" sc.w %1, %2 \n"
|
||||
" beq $zero, %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "I" (-i));
|
||||
@@ -174,6 +175,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
|
||||
" sc.w %1, %2 \n"
|
||||
" beq $zero, %1, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "r" (i));
|
||||
@@ -323,6 +325,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
|
||||
" sc.d %1, %2 \n"
|
||||
" beq %1, $zero, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "I" (-i));
|
||||
@@ -335,6 +338,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
|
||||
" sc.d %1, %2 \n"
|
||||
" beq %1, $zero, 1b \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: "=&r" (result), "=&r" (temp),
|
||||
"+" GCC_OFF_SMALL_ASM() (v->counter)
|
||||
: "r" (i));
|
||||
|
||||
@@ -18,6 +18,19 @@
|
||||
#define mb() fast_mb()
|
||||
#define iob() fast_iob()
|
||||
|
||||
#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory")
|
||||
#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory")
|
||||
#define __smp_wmb() __asm__ __volatile__("dbar 0" : : : "memory")
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __WEAK_LLSC_MB " dbar 0 \n"
|
||||
#else
|
||||
#define __WEAK_LLSC_MB " \n"
|
||||
#endif
|
||||
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
||||
/**
|
||||
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
|
||||
* @index: array element index
|
||||
@@ -46,6 +59,101 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
return mask;
|
||||
}
|
||||
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
union { typeof(*p) __val; char __c[1]; } __u; \
|
||||
unsigned long __tmp = 0; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
*(__u8 *)__u.__c = *(volatile __u8 *)p; \
|
||||
__smp_mb(); \
|
||||
break; \
|
||||
case 2: \
|
||||
*(__u16 *)__u.__c = *(volatile __u16 *)p; \
|
||||
__smp_mb(); \
|
||||
break; \
|
||||
case 4: \
|
||||
__asm__ __volatile__( \
|
||||
"amor_db.w %[val], %[tmp], %[mem] \n" \
|
||||
: [val] "=&r" (*(__u32 *)__u.__c) \
|
||||
: [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
__asm__ __volatile__( \
|
||||
"amor_db.d %[val], %[tmp], %[mem] \n" \
|
||||
: [val] "=&r" (*(__u64 *)__u.__c) \
|
||||
: [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
(typeof(*p))__u.__val; \
|
||||
})
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
union { typeof(*p) __val; char __c[1]; } __u = \
|
||||
{ .__val = (__force typeof(*p)) (v) }; \
|
||||
unsigned long __tmp; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
__smp_mb(); \
|
||||
*(volatile __u8 *)p = *(__u8 *)__u.__c; \
|
||||
break; \
|
||||
case 2: \
|
||||
__smp_mb(); \
|
||||
*(volatile __u16 *)p = *(__u16 *)__u.__c; \
|
||||
break; \
|
||||
case 4: \
|
||||
__asm__ __volatile__( \
|
||||
"amswap_db.w %[tmp], %[val], %[mem] \n" \
|
||||
: [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \
|
||||
: [val] "r" (*(__u32 *)__u.__c) \
|
||||
: ); \
|
||||
break; \
|
||||
case 8: \
|
||||
__asm__ __volatile__( \
|
||||
"amswap_db.d %[tmp], %[val], %[mem] \n" \
|
||||
: [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \
|
||||
: [val] "r" (*(__u64 *)__u.__c) \
|
||||
: ); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __smp_store_mb(p, v) \
|
||||
do { \
|
||||
union { typeof(p) __val; char __c[1]; } __u = \
|
||||
{ .__val = (__force typeof(p)) (v) }; \
|
||||
unsigned long __tmp; \
|
||||
switch (sizeof(p)) { \
|
||||
case 1: \
|
||||
*(volatile __u8 *)&p = *(__u8 *)__u.__c; \
|
||||
__smp_mb(); \
|
||||
break; \
|
||||
case 2: \
|
||||
*(volatile __u16 *)&p = *(__u16 *)__u.__c; \
|
||||
__smp_mb(); \
|
||||
break; \
|
||||
case 4: \
|
||||
__asm__ __volatile__( \
|
||||
"amswap_db.w %[tmp], %[val], %[mem] \n" \
|
||||
: [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \
|
||||
: [val] "r" (*(__u32 *)__u.__c) \
|
||||
: ); \
|
||||
break; \
|
||||
case 8: \
|
||||
__asm__ __volatile__( \
|
||||
"amswap_db.d %[tmp], %[val], %[mem] \n" \
|
||||
: [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \
|
||||
: [val] "r" (*(__u64 *)__u.__c) \
|
||||
: ); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* __ASM_BARRIER_H */
|
||||
|
||||
@@ -59,6 +59,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
|
||||
" " st " $t0, %1 \n" \
|
||||
" beq $zero, $t0, 1b \n" \
|
||||
"2: \n" \
|
||||
__WEAK_LLSC_MB \
|
||||
: "=&r" (__ret), "=ZB"(*m) \
|
||||
: "ZB"(*m), "Jr" (old), "Jr" (new) \
|
||||
: "t0", "memory"); \
|
||||
|
||||
@@ -86,6 +86,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
|
||||
"2: sc.w $t0, %2 \n"
|
||||
" beq $zero, $t0, 1b \n"
|
||||
"3: \n"
|
||||
__WEAK_LLSC_MB
|
||||
" .section .fixup,\"ax\" \n"
|
||||
"4: li.d %0, %6 \n"
|
||||
" b 3b \n"
|
||||
|
||||
@@ -21,4 +21,6 @@ typedef struct {
|
||||
|
||||
DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
|
||||
#define __ARCH_IRQ_STAT
|
||||
|
||||
#endif /* _ASM_HARDIRQ_H */
|
||||
|
||||
@@ -125,6 +125,8 @@ extern struct irq_domain *pch_lpc_domain;
|
||||
extern struct irq_domain *pch_msi_domain[MAX_IO_PICS];
|
||||
extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
|
||||
|
||||
extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
|
||||
|
||||
#include <asm-generic/irq.h>
|
||||
|
||||
#endif /* _ASM_IRQ_H */
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#ifndef __ASM_PERCPU_H
|
||||
#define __ASM_PERCPU_H
|
||||
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
/* Use r21 for fast access */
|
||||
register unsigned long __my_cpu_offset __asm__("$r21");
|
||||
|
||||
@@ -15,6 +17,198 @@ static inline void set_my_cpu_offset(unsigned long off)
|
||||
}
|
||||
#define __my_cpu_offset __my_cpu_offset
|
||||
|
||||
#define PERCPU_OP(op, asm_op, c_op) \
|
||||
static inline unsigned long __percpu_##op(void *ptr, \
|
||||
unsigned long val, int size) \
|
||||
{ \
|
||||
unsigned long ret; \
|
||||
\
|
||||
switch (size) { \
|
||||
case 4: \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
|
||||
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
|
||||
: [val] "r" (val)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
|
||||
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
|
||||
: [val] "r" (val)); \
|
||||
break; \
|
||||
default: \
|
||||
ret = 0; \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
\
|
||||
return ret c_op val; \
|
||||
}
|
||||
|
||||
PERCPU_OP(add, add, +)
|
||||
PERCPU_OP(and, and, &)
|
||||
PERCPU_OP(or, or, |)
|
||||
#undef PERCPU_OP
|
||||
|
||||
static inline unsigned long __percpu_read(void *ptr, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
__asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
|
||||
: [ret] "=&r"(ret)
|
||||
: [ptr] "r"(ptr)
|
||||
: "memory");
|
||||
break;
|
||||
case 2:
|
||||
__asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
|
||||
: [ret] "=&r"(ret)
|
||||
: [ptr] "r"(ptr)
|
||||
: "memory");
|
||||
break;
|
||||
case 4:
|
||||
__asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
|
||||
: [ret] "=&r"(ret)
|
||||
: [ptr] "r"(ptr)
|
||||
: "memory");
|
||||
break;
|
||||
case 8:
|
||||
__asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
|
||||
: [ret] "=&r"(ret)
|
||||
: [ptr] "r"(ptr)
|
||||
: "memory");
|
||||
break;
|
||||
default:
|
||||
ret = 0;
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
__asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
|
||||
:
|
||||
: [val] "r" (val), [ptr] "r" (ptr)
|
||||
: "memory");
|
||||
break;
|
||||
case 2:
|
||||
__asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
|
||||
:
|
||||
: [val] "r" (val), [ptr] "r" (ptr)
|
||||
: "memory");
|
||||
break;
|
||||
case 4:
|
||||
__asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
|
||||
:
|
||||
: [val] "r" (val), [ptr] "r" (ptr)
|
||||
: "memory");
|
||||
break;
|
||||
case 8:
|
||||
__asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
|
||||
:
|
||||
: [val] "r" (val), [ptr] "r" (ptr)
|
||||
: "memory");
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 4:
|
||||
return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
|
||||
|
||||
case 8:
|
||||
return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
|
||||
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* this_cpu_cmpxchg */
|
||||
#define _protect_cmpxchg_local(pcp, o, n) \
|
||||
({ \
|
||||
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
|
||||
preempt_disable_notrace(); \
|
||||
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
|
||||
preempt_enable_notrace(); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define _percpu_read(pcp) \
|
||||
({ \
|
||||
typeof(pcp) __retval; \
|
||||
__retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
#define _percpu_write(pcp, val) \
|
||||
do { \
|
||||
__percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
|
||||
} while (0) \
|
||||
|
||||
#define _pcp_protect(operation, pcp, val) \
|
||||
({ \
|
||||
typeof(pcp) __retval; \
|
||||
preempt_disable_notrace(); \
|
||||
__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
|
||||
(val), sizeof(pcp)); \
|
||||
preempt_enable_notrace(); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
#define _percpu_add(pcp, val) \
|
||||
_pcp_protect(__percpu_add, pcp, val)
|
||||
|
||||
#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
|
||||
|
||||
#define _percpu_and(pcp, val) \
|
||||
_pcp_protect(__percpu_and, pcp, val)
|
||||
|
||||
#define _percpu_or(pcp, val) \
|
||||
_pcp_protect(__percpu_or, pcp, val)
|
||||
|
||||
#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
|
||||
_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
|
||||
|
||||
#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
|
||||
#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
|
||||
|
||||
#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
|
||||
#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
|
||||
|
||||
#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
|
||||
#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
|
||||
|
||||
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
|
||||
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
|
||||
|
||||
#define this_cpu_read_1(pcp) _percpu_read(pcp)
|
||||
#define this_cpu_read_2(pcp) _percpu_read(pcp)
|
||||
#define this_cpu_read_4(pcp) _percpu_read(pcp)
|
||||
#define this_cpu_read_8(pcp) _percpu_read(pcp)
|
||||
|
||||
#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
|
||||
#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
|
||||
#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
|
||||
#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
|
||||
|
||||
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
|
||||
#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
|
||||
|
||||
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* __ASM_PERCPU_H */
|
||||
|
||||
@@ -279,8 +279,29 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
* Make sure the buddy is global too (if it's !none,
|
||||
* it better already be global)
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* For SMP, multiple CPUs can race, so we need to do
|
||||
* this atomically.
|
||||
*/
|
||||
unsigned long page_global = _PAGE_GLOBAL;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1:" __LL "%[tmp], %[buddy] \n"
|
||||
" bnez %[tmp], 2f \n"
|
||||
" or %[tmp], %[tmp], %[global] \n"
|
||||
__SC "%[tmp], %[buddy] \n"
|
||||
" beqz %[tmp], 1b \n"
|
||||
" nop \n"
|
||||
"2: \n"
|
||||
__WEAK_LLSC_MB
|
||||
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
|
||||
: [global] "r" (page_global));
|
||||
#else /* !CONFIG_SMP */
|
||||
if (pte_none(*buddy))
|
||||
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
124
arch/loongarch/include/asm/smp.h
Normal file
124
arch/loongarch/include/asm/smp.h
Normal file
@@ -0,0 +1,124 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Author: Huacai Chen <chenhuacai@loongson.cn>
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#ifndef __ASM_SMP_H
|
||||
#define __ASM_SMP_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
void loongson3_smp_setup(void);
|
||||
void loongson3_prepare_cpus(unsigned int max_cpus);
|
||||
void loongson3_boot_secondary(int cpu, struct task_struct *idle);
|
||||
void loongson3_init_secondary(void);
|
||||
void loongson3_smp_finish(void);
|
||||
void loongson3_send_ipi_single(int cpu, unsigned int action);
|
||||
void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int loongson3_cpu_disable(void);
|
||||
void loongson3_cpu_die(unsigned int cpu);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static inline void plat_smp_setup(void)
|
||||
{
|
||||
loongson3_smp_setup();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
static inline void plat_smp_setup(void) { }
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
extern int smp_num_siblings;
|
||||
extern int num_processors;
|
||||
extern int disabled_cpus;
|
||||
extern cpumask_t cpu_sibling_map[];
|
||||
extern cpumask_t cpu_core_map[];
|
||||
extern cpumask_t cpu_foreign_map[];
|
||||
|
||||
static inline int raw_smp_processor_id(void)
|
||||
{
|
||||
#if defined(__VDSO__)
|
||||
extern int vdso_smp_processor_id(void)
|
||||
__compiletime_error("VDSO should not call smp_processor_id()");
|
||||
return vdso_smp_processor_id();
|
||||
#else
|
||||
return current_thread_info()->cpu;
|
||||
#endif
|
||||
}
|
||||
#define raw_smp_processor_id raw_smp_processor_id
|
||||
|
||||
/* Map from cpu id to sequential logical cpu number. This will only
|
||||
* not be idempotent when cpus failed to come on-line. */
|
||||
extern int __cpu_number_map[NR_CPUS];
|
||||
#define cpu_number_map(cpu) __cpu_number_map[cpu]
|
||||
|
||||
/* The reverse map from sequential logical cpu number to cpu id. */
|
||||
extern int __cpu_logical_map[NR_CPUS];
|
||||
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
||||
|
||||
#define cpu_physical_id(cpu) cpu_logical_map(cpu)
|
||||
|
||||
#define SMP_BOOT_CPU 0x1
|
||||
#define SMP_RESCHEDULE 0x2
|
||||
#define SMP_CALL_FUNCTION 0x4
|
||||
|
||||
struct secondary_data {
|
||||
unsigned long stack;
|
||||
unsigned long thread_info;
|
||||
};
|
||||
extern struct secondary_data cpuboot_data;
|
||||
|
||||
extern asmlinkage void smpboot_entry(void);
|
||||
|
||||
extern void calculate_cpu_foreign_map(void);
|
||||
|
||||
/*
|
||||
* Generate IPI list text
|
||||
*/
|
||||
extern void show_ipi_list(struct seq_file *p, int prec);
|
||||
|
||||
/*
|
||||
* This function sends a 'reschedule' IPI to another CPU.
|
||||
* it goes straight through and wastes no time serializing
|
||||
* anything. Worst case is that we lose a reschedule ...
|
||||
*/
|
||||
static inline void smp_send_reschedule(int cpu)
|
||||
{
|
||||
loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
|
||||
}
|
||||
|
||||
static inline void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
|
||||
}
|
||||
|
||||
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static inline int __cpu_disable(void)
|
||||
{
|
||||
return loongson3_cpu_disable();
|
||||
}
|
||||
|
||||
static inline void __cpu_die(unsigned int cpu)
|
||||
{
|
||||
loongson3_cpu_die(cpu);
|
||||
}
|
||||
|
||||
extern void play_dead(void);
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SMP_H */
|
||||
@@ -77,17 +77,24 @@
|
||||
* new value in sp.
|
||||
*/
|
||||
.macro get_saved_sp docfi=0
|
||||
la.abs t1, kernelsp
|
||||
move t0, sp
|
||||
la.abs t1, kernelsp
|
||||
#ifdef CONFIG_SMP
|
||||
csrrd t0, PERCPU_BASE_KS
|
||||
LONG_ADD t1, t1, t0
|
||||
#endif
|
||||
move t0, sp
|
||||
.if \docfi
|
||||
.cfi_register sp, t0
|
||||
.endif
|
||||
LONG_L sp, t1, 0
|
||||
LONG_L sp, t1, 0
|
||||
.endm
|
||||
|
||||
.macro set_saved_sp stackp temp temp2
|
||||
la.abs \temp, kernelsp
|
||||
LONG_S \stackp, \temp, 0
|
||||
la.abs \temp, kernelsp
|
||||
#ifdef CONFIG_SMP
|
||||
LONG_ADD \temp, \temp, u0
|
||||
#endif
|
||||
LONG_S \stackp, \temp, 0
|
||||
.endm
|
||||
|
||||
.macro SAVE_SOME docfi=0
|
||||
|
||||
@@ -25,6 +25,17 @@ extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
|
||||
extern void local_flush_tlb_one(unsigned long vaddr);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_mm(struct mm_struct *);
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long);
|
||||
extern void flush_tlb_kernel_range(unsigned long, unsigned long);
|
||||
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
|
||||
extern void flush_tlb_one(unsigned long vaddr);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define flush_tlb_all() local_flush_tlb_all()
|
||||
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
||||
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
|
||||
@@ -32,4 +43,6 @@ extern void local_flush_tlb_one(unsigned long vaddr);
|
||||
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
|
||||
#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#endif /* __ASM_TLBFLUSH_H */
|
||||
|
||||
@@ -7,7 +7,12 @@
|
||||
|
||||
#include <linux/smp.h>
|
||||
|
||||
#define cpu_logical_map(cpu) 0
|
||||
#ifdef CONFIG_SMP
|
||||
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
|
||||
#define topology_core_id(cpu) (cpu_data[cpu].core)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
|
||||
#endif
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
|
||||
|
||||
@@ -18,4 +18,6 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o
|
||||
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
|
||||
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
|
||||
|
||||
@@ -137,8 +137,44 @@ void __init acpi_boot_table_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int set_processor_mask(u32 id, u32 flags)
|
||||
{
|
||||
|
||||
int cpu, cpuid = id;
|
||||
|
||||
if (num_processors >= nr_cpu_ids) {
|
||||
pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
|
||||
" processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
|
||||
|
||||
return -ENODEV;
|
||||
|
||||
}
|
||||
if (cpuid == loongson_sysconf.boot_cpu_id)
|
||||
cpu = 0;
|
||||
else
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
|
||||
if (flags & ACPI_MADT_ENABLED) {
|
||||
num_processors++;
|
||||
set_cpu_possible(cpu, true);
|
||||
set_cpu_present(cpu, true);
|
||||
__cpu_number_map[cpuid] = cpu;
|
||||
__cpu_logical_map[cpu] = cpuid;
|
||||
} else
|
||||
disabled_cpus++;
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static void __init acpi_process_madt(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
__cpu_number_map[i] = -1;
|
||||
__cpu_logical_map[i] = -1;
|
||||
}
|
||||
|
||||
loongson_sysconf.nr_cpus = num_processors;
|
||||
}
|
||||
|
||||
@@ -167,3 +203,36 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
|
||||
{
|
||||
memblock_reserve(addr, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
|
||||
#include <acpi/processor.h>
|
||||
|
||||
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
|
||||
if (cpu < 0) {
|
||||
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
|
||||
return cpu;
|
||||
}
|
||||
|
||||
*pcpu = cpu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_cpu);
|
||||
|
||||
int acpi_unmap_cpu(int cpu)
|
||||
{
|
||||
set_cpu_present(cpu, false);
|
||||
num_processors--;
|
||||
|
||||
pr_info("cpu%d hot remove!\n", cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_unmap_cpu);
|
||||
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
@@ -252,3 +252,13 @@ void output_signal_defines(void)
|
||||
DEFINE(_SIGXFSZ, SIGXFSZ);
|
||||
BLANK();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void output_smpboot_defines(void)
|
||||
{
|
||||
COMMENT("Linux smp cpu boot offsets.");
|
||||
OFFSET(CPU_BOOT_STACK, secondary_data, stack);
|
||||
OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info);
|
||||
BLANK();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -65,4 +65,34 @@ SYM_CODE_START(kernel_entry) # kernel entry point
|
||||
|
||||
SYM_CODE_END(kernel_entry)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
* SMP slave cpus entry point. Board specific code for bootstrap calls this
|
||||
* function after setting up the stack and tp registers.
|
||||
*/
|
||||
SYM_CODE_START(smpboot_entry)
|
||||
li.d t0, CSR_DMW0_INIT # UC, PLV0
|
||||
csrwr t0, LOONGARCH_CSR_DMWIN0
|
||||
li.d t0, CSR_DMW1_INIT # CA, PLV0
|
||||
csrwr t0, LOONGARCH_CSR_DMWIN1
|
||||
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
|
||||
csrwr t0, LOONGARCH_CSR_CRMD
|
||||
li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
|
||||
csrwr t0, LOONGARCH_CSR_PRMD
|
||||
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
|
||||
csrwr t0, LOONGARCH_CSR_EUEN
|
||||
|
||||
la.abs t0, cpuboot_data
|
||||
ld.d sp, t0, CPU_BOOT_STACK
|
||||
ld.d tp, t0, CPU_BOOT_TINFO
|
||||
|
||||
la.abs t0, 0f
|
||||
jirl zero, t0, 0
|
||||
0:
|
||||
bl start_secondary
|
||||
SYM_CODE_END(smpboot_entry)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
|
||||
@@ -47,13 +47,17 @@ asmlinkage void spurious_interrupt(void)
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
show_ipi_list(p, prec);
|
||||
#endif
|
||||
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
int i, r, ipi_irq;
|
||||
static int ipi_dummy_dev;
|
||||
unsigned int order = get_order(IRQ_STACK_SIZE);
|
||||
struct page *page;
|
||||
|
||||
@@ -61,6 +65,13 @@ void __init init_IRQ(void)
|
||||
clear_csr_estat(ESTATF_IP);
|
||||
|
||||
irqchip_init();
|
||||
#ifdef CONFIG_SMP
|
||||
ipi_irq = EXCCODE_IPI - EXCCODE_INT_START;
|
||||
irq_set_percpu_devid(ipi_irq);
|
||||
r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
|
||||
if (r < 0)
|
||||
panic("IPI IRQ request failed\n");
|
||||
#endif
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_set_noprobe(i);
|
||||
|
||||
@@ -35,6 +35,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
unsigned int fp_version = cpu_data[n].fpu_vers;
|
||||
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_online(n))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For the first processor also print the system type
|
||||
*/
|
||||
|
||||
@@ -53,6 +53,13 @@
|
||||
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
|
||||
EXPORT_SYMBOL(boot_option_idle_override);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
play_dead();
|
||||
}
|
||||
#endif
|
||||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
asmlinkage void ret_from_kernel_thread(void);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user