You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'generic-ipi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'generic-ipi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (22 commits) generic-ipi: more merge fallout generic-ipi: merge fix x86, visws: use mach-default/entry_arch.h x86, visws: fix generic-ipi build generic-ipi: fixlet generic-ipi: fix s390 build bug generic-ipi: fix linux-next tree build failure fix: "smp_call_function: get rid of the unused nonatomic/retry argument" fix: "smp_call_function: get rid of the unused nonatomic/retry argument" fix "smp_call_function: get rid of the unused nonatomic/retry argument" on_each_cpu(): kill unused 'retry' parameter smp_call_function: get rid of the unused nonatomic/retry argument sh: convert to generic helpers for IPI function calls parisc: convert to generic helpers for IPI function calls mips: convert to generic helpers for IPI function calls m32r: convert to generic helpers for IPI function calls arm: convert to generic helpers for IPI function calls alpha: convert to generic helpers for IPI function calls ia64: convert to generic helpers for IPI function calls powerpc: convert to generic helpers for IPI function calls ... Fix trivial conflicts due to rcu updates in kernel/rcupdate.c manually
This commit is contained in:
@@ -39,3 +39,6 @@ config HAVE_KRETPROBES
|
||||
|
||||
config HAVE_DMA_ATTRS
|
||||
def_bool n
|
||||
|
||||
config USE_GENERIC_SMP_HELPERS
|
||||
def_bool n
|
||||
|
||||
@@ -528,6 +528,7 @@ config ARCH_MAY_HAVE_PC_FDC
|
||||
config SMP
|
||||
bool "Symmetric multi-processing support"
|
||||
depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
---help---
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, like most personal computers, say N. If
|
||||
|
||||
@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (smp_processor_id() != boot_cpuid)
|
||||
smp_call_function_on_cpu(__marvel_access_rtc,
|
||||
&rtc_access, 1, 1,
|
||||
cpumask_of_cpu(boot_cpuid));
|
||||
smp_call_function_single(boot_cpuid,
|
||||
__marvel_access_rtc,
|
||||
&rtc_access, 1);
|
||||
else
|
||||
__marvel_access_rtc(&rtc_access);
|
||||
#else
|
||||
|
||||
@@ -160,7 +160,7 @@ common_shutdown(int mode, char *restart_cmd)
|
||||
struct halt_info args;
|
||||
args.mode = mode;
|
||||
args.restart_cmd = restart_cmd;
|
||||
on_each_cpu(common_shutdown_1, &args, 1, 0);
|
||||
on_each_cpu(common_shutdown_1, &args, 0);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
+14
-164
@@ -62,6 +62,7 @@ static struct {
|
||||
enum ipi_message_type {
|
||||
IPI_RESCHEDULE,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CALL_FUNC_SINGLE,
|
||||
IPI_CPU_STOP,
|
||||
};
|
||||
|
||||
@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
|
||||
wripir(i);
|
||||
}
|
||||
|
||||
/* Structure and data for smp_call_function. This is designed to
|
||||
minimize static memory requirements. Plus it looks cleaner. */
|
||||
|
||||
struct smp_call_struct {
|
||||
void (*func) (void *info);
|
||||
void *info;
|
||||
long wait;
|
||||
atomic_t unstarted_count;
|
||||
atomic_t unfinished_count;
|
||||
};
|
||||
|
||||
static struct smp_call_struct *smp_call_function_data;
|
||||
|
||||
/* Atomicly drop data into a shared pointer. The pointer is free if
|
||||
it is initially locked. If retry, spin until free. */
|
||||
|
||||
static int
|
||||
pointer_lock (void *lock, void *data, int retry)
|
||||
{
|
||||
void *old, *tmp;
|
||||
|
||||
mb();
|
||||
again:
|
||||
/* Compare and swap with zero. */
|
||||
asm volatile (
|
||||
"1: ldq_l %0,%1\n"
|
||||
" mov %3,%2\n"
|
||||
" bne %0,2f\n"
|
||||
" stq_c %2,%1\n"
|
||||
" beq %2,1b\n"
|
||||
"2:"
|
||||
: "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
|
||||
: "r"(data)
|
||||
: "memory");
|
||||
|
||||
if (old == 0)
|
||||
return 0;
|
||||
if (! retry)
|
||||
return -EBUSY;
|
||||
|
||||
while (*(void **)lock)
|
||||
barrier();
|
||||
goto again;
|
||||
}
|
||||
|
||||
void
|
||||
handle_ipi(struct pt_regs *regs)
|
||||
{
|
||||
@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs)
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
{
|
||||
struct smp_call_struct *data;
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
int wait;
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
data = smp_call_function_data;
|
||||
func = data->func;
|
||||
info = data->info;
|
||||
wait = data->wait;
|
||||
|
||||
/* Notify the sending CPU that the data has been
|
||||
received, and execution is about to begin. */
|
||||
mb();
|
||||
atomic_dec (&data->unstarted_count);
|
||||
|
||||
/* At this point the structure may be gone unless
|
||||
wait is true. */
|
||||
(*func)(info);
|
||||
|
||||
/* Notify the sending CPU that the task is done. */
|
||||
mb();
|
||||
if (wait) atomic_dec (&data->unfinished_count);
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
}
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
halt();
|
||||
@@ -700,102 +637,15 @@ smp_send_stop(void)
|
||||
send_ipi_message(to_whom, IPI_CPU_STOP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Run a function on all other CPUs.
|
||||
* <func> The function to run. This must be fast and non-blocking.
|
||||
* <info> An arbitrary pointer to pass to the function.
|
||||
* <retry> If true, keep retrying until ready.
|
||||
* <wait> If true, wait until function has completed on other CPUs.
|
||||
* [RETURNS] 0 on success, else a negative status code.
|
||||
*
|
||||
* Does not return until remote CPUs are nearly ready to execute <func>
|
||||
* or are or have executed.
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
|
||||
int
|
||||
smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
|
||||
int wait, cpumask_t to_whom)
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
{
|
||||
struct smp_call_struct data;
|
||||
unsigned long timeout;
|
||||
int num_cpus_to_call;
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
data.wait = wait;
|
||||
|
||||
cpu_clear(smp_processor_id(), to_whom);
|
||||
num_cpus_to_call = cpus_weight(to_whom);
|
||||
|
||||
atomic_set(&data.unstarted_count, num_cpus_to_call);
|
||||
atomic_set(&data.unfinished_count, num_cpus_to_call);
|
||||
|
||||
/* Acquire the smp_call_function_data mutex. */
|
||||
if (pointer_lock(&smp_call_function_data, &data, retry))
|
||||
return -EBUSY;
|
||||
|
||||
/* Send a message to the requested CPUs. */
|
||||
send_ipi_message(to_whom, IPI_CALL_FUNC);
|
||||
|
||||
/* Wait for a minimal response. */
|
||||
timeout = jiffies + HZ;
|
||||
while (atomic_read (&data.unstarted_count) > 0
|
||||
&& time_before (jiffies, timeout))
|
||||
barrier();
|
||||
|
||||
/* If there's no response yet, log a message but allow a longer
|
||||
* timeout period -- if we get a response this time, log
|
||||
* a message saying when we got it..
|
||||
*/
|
||||
if (atomic_read(&data.unstarted_count) > 0) {
|
||||
long start_time = jiffies;
|
||||
printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
|
||||
__func__);
|
||||
timeout = jiffies + 30 * HZ;
|
||||
while (atomic_read(&data.unstarted_count) > 0
|
||||
&& time_before(jiffies, timeout))
|
||||
barrier();
|
||||
if (atomic_read(&data.unstarted_count) <= 0) {
|
||||
long delta = jiffies - start_time;
|
||||
printk(KERN_ERR
|
||||
"%s: response %ld.%ld seconds into long wait\n",
|
||||
__func__, delta / HZ,
|
||||
(100 * (delta - ((delta / HZ) * HZ))) / HZ);
|
||||
}
|
||||
}
|
||||
|
||||
/* We either got one or timed out -- clear the lock. */
|
||||
mb();
|
||||
smp_call_function_data = NULL;
|
||||
|
||||
/*
|
||||
* If after both the initial and long timeout periods we still don't
|
||||
* have a response, something is very wrong...
|
||||
*/
|
||||
BUG_ON(atomic_read (&data.unstarted_count) > 0);
|
||||
|
||||
/* Wait for a complete response, if needed. */
|
||||
if (wait) {
|
||||
while (atomic_read (&data.unfinished_count) > 0)
|
||||
barrier();
|
||||
}
|
||||
|
||||
return 0;
|
||||
send_ipi_message(mask, IPI_CALL_FUNC);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_on_cpu);
|
||||
|
||||
int
|
||||
smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
return smp_call_function_on_cpu (func, info, retry, wait,
|
||||
cpu_online_map);
|
||||
send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
static void
|
||||
ipi_imb(void *ignored)
|
||||
@@ -807,7 +657,7 @@ void
|
||||
smp_imb(void)
|
||||
{
|
||||
/* Must wait other processors to flush their icache before continue. */
|
||||
if (on_each_cpu(ipi_imb, NULL, 1, 1))
|
||||
if (on_each_cpu(ipi_imb, NULL, 1))
|
||||
printk(KERN_CRIT "smp_imb: timed out\n");
|
||||
}
|
||||
EXPORT_SYMBOL(smp_imb);
|
||||
@@ -823,7 +673,7 @@ flush_tlb_all(void)
|
||||
{
|
||||
/* Although we don't have any data to pass, we do want to
|
||||
synchronize with the other processors. */
|
||||
if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
|
||||
if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
|
||||
printk(KERN_CRIT "flush_tlb_all: timed out\n");
|
||||
}
|
||||
}
|
||||
@@ -860,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm)
|
||||
}
|
||||
}
|
||||
|
||||
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
|
||||
if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
|
||||
printk(KERN_CRIT "flush_tlb_mm: timed out\n");
|
||||
}
|
||||
|
||||
@@ -913,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
||||
data.mm = mm;
|
||||
data.addr = addr;
|
||||
|
||||
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
|
||||
if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
|
||||
printk(KERN_CRIT "flush_tlb_page: timed out\n");
|
||||
}
|
||||
|
||||
@@ -965,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||
}
|
||||
}
|
||||
|
||||
if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
|
||||
if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
|
||||
printk(KERN_CRIT "flush_icache_page: timed out\n");
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ op_axp_setup(void)
|
||||
model->reg_setup(®, ctr, &sys);
|
||||
|
||||
/* Configure the registers on all cpus. */
|
||||
(void)smp_call_function(model->cpu_setup, ®, 0, 1);
|
||||
(void)smp_call_function(model->cpu_setup, ®, 1);
|
||||
model->cpu_setup(®);
|
||||
return 0;
|
||||
}
|
||||
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy)
|
||||
static int
|
||||
op_axp_start(void)
|
||||
{
|
||||
(void)smp_call_function(op_axp_cpu_start, NULL, 0, 1);
|
||||
(void)smp_call_function(op_axp_cpu_start, NULL, 1);
|
||||
op_axp_cpu_start(NULL);
|
||||
return 0;
|
||||
}
|
||||
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy)
|
||||
static void
|
||||
op_axp_stop(void)
|
||||
{
|
||||
(void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1);
|
||||
(void)smp_call_function(op_axp_cpu_stop, NULL, 1);
|
||||
op_axp_cpu_stop(NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -701,6 +701,7 @@ source "kernel/time/Kconfig"
|
||||
config SMP
|
||||
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP)
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
help
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, like most personal computers, say N. If
|
||||
|
||||
+18
-145
@@ -68,20 +68,10 @@ enum ipi_msg_type {
|
||||
IPI_TIMER,
|
||||
IPI_RESCHEDULE,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CALL_FUNC_SINGLE,
|
||||
IPI_CPU_STOP,
|
||||
};
|
||||
|
||||
struct smp_call_struct {
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
int wait;
|
||||
cpumask_t pending;
|
||||
cpumask_t unfinished;
|
||||
};
|
||||
|
||||
static struct smp_call_struct * volatile smp_call_function_data;
|
||||
static DEFINE_SPINLOCK(smp_call_function_lock);
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
|
||||
@@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* You must not call this function with disabled interrupts, from a
|
||||
* hardware interrupt handler, nor from a bottom half handler.
|
||||
*/
|
||||
static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
|
||||
int retry, int wait, cpumask_t callmap)
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
{
|
||||
struct smp_call_struct data;
|
||||
unsigned long timeout;
|
||||
int ret = 0;
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
data.wait = wait;
|
||||
|
||||
cpu_clear(smp_processor_id(), callmap);
|
||||
if (cpus_empty(callmap))
|
||||
goto out;
|
||||
|
||||
data.pending = callmap;
|
||||
if (wait)
|
||||
data.unfinished = callmap;
|
||||
|
||||
/*
|
||||
* try to get the mutex on smp_call_function_data
|
||||
*/
|
||||
spin_lock(&smp_call_function_lock);
|
||||
smp_call_function_data = &data;
|
||||
|
||||
send_ipi_message(callmap, IPI_CALL_FUNC);
|
||||
|
||||
timeout = jiffies + HZ;
|
||||
while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
|
||||
barrier();
|
||||
|
||||
/*
|
||||
* did we time out?
|
||||
*/
|
||||
if (!cpus_empty(data.pending)) {
|
||||
/*
|
||||
* this may be causing our panic - report it
|
||||
*/
|
||||
printk(KERN_CRIT
|
||||
"CPU%u: smp_call_function timeout for %p(%p)\n"
|
||||
" callmap %lx pending %lx, %swait\n",
|
||||
smp_processor_id(), func, info, *cpus_addr(callmap),
|
||||
*cpus_addr(data.pending), wait ? "" : "no ");
|
||||
|
||||
/*
|
||||
* TRACE
|
||||
*/
|
||||
timeout = jiffies + (5 * HZ);
|
||||
while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
|
||||
barrier();
|
||||
|
||||
if (cpus_empty(data.pending))
|
||||
printk(KERN_CRIT " RESOLVED\n");
|
||||
else
|
||||
printk(KERN_CRIT " STILL STUCK\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* whatever happened, we're done with the data, so release it
|
||||
*/
|
||||
smp_call_function_data = NULL;
|
||||
spin_unlock(&smp_call_function_lock);
|
||||
|
||||
if (!cpus_empty(data.pending)) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (wait)
|
||||
while (!cpus_empty(data.unfinished))
|
||||
barrier();
|
||||
out:
|
||||
|
||||
return 0;
|
||||
send_ipi_message(mask, IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
int smp_call_function(void (*func)(void *info), void *info, int retry,
|
||||
int wait)
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
return smp_call_function_on_cpu(func, info, retry, wait,
|
||||
cpu_online_map);
|
||||
send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function);
|
||||
|
||||
int smp_call_function_single(int cpu, void (*func)(void *info), void *info,
|
||||
int retry, int wait)
|
||||
{
|
||||
/* prevent preemption and reschedule on another processor */
|
||||
int current_cpu = get_cpu();
|
||||
int ret = 0;
|
||||
|
||||
if (cpu == current_cpu) {
|
||||
local_irq_disable();
|
||||
func(info);
|
||||
local_irq_enable();
|
||||
} else
|
||||
ret = smp_call_function_on_cpu(func, info, retry, wait,
|
||||
cpumask_of_cpu(cpu));
|
||||
|
||||
put_cpu();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_single);
|
||||
|
||||
void show_ipi_list(struct seq_file *p)
|
||||
{
|
||||
@@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ipi_call_function - handle IPI from smp_call_function()
|
||||
*
|
||||
* Note that we copy data out of the cross-call structure and then
|
||||
* let the caller know that we're here and have done with their data
|
||||
*/
|
||||
static void ipi_call_function(unsigned int cpu)
|
||||
{
|
||||
struct smp_call_struct *data = smp_call_function_data;
|
||||
void (*func)(void *info) = data->func;
|
||||
void *info = data->info;
|
||||
int wait = data->wait;
|
||||
|
||||
cpu_clear(cpu, data->pending);
|
||||
|
||||
func(info);
|
||||
|
||||
if (wait)
|
||||
cpu_clear(cpu, data->unfinished);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(stop_lock);
|
||||
|
||||
/*
|
||||
@@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
ipi_call_function(cpu);
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
@@ -662,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier)
|
||||
}
|
||||
|
||||
static int
|
||||
on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
|
||||
cpumask_t mask)
|
||||
on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
|
||||
ret = smp_call_function_mask(mask, func, info, wait);
|
||||
if (cpu_isset(smp_processor_id(), mask))
|
||||
func(info);
|
||||
|
||||
@@ -731,14 +604,14 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1);
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
cpumask_t mask = mm->cpu_vm_mask;
|
||||
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
@@ -749,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = uaddr;
|
||||
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
@@ -758,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
|
||||
ta.ta_start = kaddr;
|
||||
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1);
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
@@ -771,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
@@ -781,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1);
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void))
|
||||
data.ret = 0;
|
||||
|
||||
preempt_disable();
|
||||
smp_call_function(em_func, &data, 1, 1);
|
||||
smp_call_function(em_func, &data, 1);
|
||||
em_func(&data);
|
||||
preempt_enable();
|
||||
|
||||
|
||||
@@ -352,7 +352,7 @@ static int __init vfp_init(void)
|
||||
else if (vfpsid & FPSID_NODOUBLE) {
|
||||
printk("no double precision support\n");
|
||||
} else {
|
||||
smp_call_function(vfp_enable, NULL, 1, 1);
|
||||
smp_call_function(vfp_enable, NULL, 1);
|
||||
|
||||
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
|
||||
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
|
||||
|
||||
@@ -194,7 +194,7 @@ void stop_this_cpu(void* dummy)
|
||||
/* Other calls */
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
smp_call_function(stop_this_cpu, NULL, 1, 0);
|
||||
smp_call_function(stop_this_cpu, NULL, 0);
|
||||
}
|
||||
|
||||
int setup_profiling_timer(unsigned int multiplier)
|
||||
@@ -316,8 +316,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int smp_call_function(void (*func)(void *info), void *info,
|
||||
int nonatomic, int wait)
|
||||
int smp_call_function(void (*func)(void *info), void *info, int wait)
|
||||
{
|
||||
cpumask_t cpu_mask = CPU_MASK_ALL;
|
||||
struct call_data_struct data;
|
||||
|
||||
@@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING
|
||||
|
||||
config SMP
|
||||
bool "Symmetric multi-processing support"
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
help
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, say N. If you have a system with more
|
||||
|
||||
@@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
|
||||
static void
|
||||
ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
|
||||
{
|
||||
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
|
||||
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
|
||||
static void
|
||||
ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
|
||||
{
|
||||
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
|
||||
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
|
||||
NULL, 1, 0);
|
||||
NULL, 0);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
|
||||
@@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
|
||||
|
||||
|
||||
/* will send IPI to other CPU and wait for completion of remote call */
|
||||
if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
|
||||
if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
|
||||
printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
|
||||
"error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
|
||||
return 0;
|
||||
|
||||
@@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
|
||||
int ret;
|
||||
|
||||
DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
|
||||
ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
|
||||
ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
|
||||
DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
@@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
||||
}
|
||||
|
||||
/* save the current system wide pmu states */
|
||||
ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
|
||||
ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
|
||||
if (ret) {
|
||||
DPRINT(("on_each_cpu() failed: %d\n", ret));
|
||||
goto cleanup_reserve;
|
||||
@@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
|
||||
|
||||
pfm_alt_intr_handler = NULL;
|
||||
|
||||
ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
|
||||
ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
|
||||
if (ret) {
|
||||
DPRINT(("on_each_cpu() failed: %d\n", ret));
|
||||
}
|
||||
|
||||
@@ -286,7 +286,7 @@ void cpu_idle_wait(void)
|
||||
{
|
||||
smp_mb();
|
||||
/* kick all the CPUs so that they exit out of pm_idle */
|
||||
smp_call_function(do_nothing, NULL, 0, 1);
|
||||
smp_call_function(do_nothing, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
||||
|
||||
|
||||
+15
-239
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts {
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
|
||||
|
||||
|
||||
/*
|
||||
* Structure and data for smp_call_function(). This is designed to minimise static memory
|
||||
* requirements. It also looks cleaner.
|
||||
*/
|
||||
static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
|
||||
|
||||
struct call_data_struct {
|
||||
void (*func) (void *info);
|
||||
void *info;
|
||||
long wait;
|
||||
atomic_t started;
|
||||
atomic_t finished;
|
||||
};
|
||||
|
||||
static volatile struct call_data_struct *call_data;
|
||||
|
||||
#define IPI_CALL_FUNC 0
|
||||
#define IPI_CPU_STOP 1
|
||||
#define IPI_CALL_FUNC_SINGLE 2
|
||||
#define IPI_KDUMP_CPU_STOP 3
|
||||
|
||||
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
|
||||
@@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
|
||||
|
||||
extern void cpu_halt (void);
|
||||
|
||||
void
|
||||
lock_ipi_calllock(void)
|
||||
{
|
||||
spin_lock_irq(&call_lock);
|
||||
}
|
||||
|
||||
void
|
||||
unlock_ipi_calllock(void)
|
||||
{
|
||||
spin_unlock_irq(&call_lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
handle_call_data(void)
|
||||
{
|
||||
struct call_data_struct *data;
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
int wait;
|
||||
|
||||
/* release the 'pointer lock' */
|
||||
data = (struct call_data_struct *)call_data;
|
||||
func = data->func;
|
||||
info = data->info;
|
||||
wait = data->wait;
|
||||
|
||||
mb();
|
||||
atomic_inc(&data->started);
|
||||
/* At this point the structure may be gone unless wait is true. */
|
||||
(*func)(info);
|
||||
|
||||
/* Notify the sending CPU that the task is done. */
|
||||
mb();
|
||||
if (wait)
|
||||
atomic_inc(&data->finished);
|
||||
}
|
||||
|
||||
static void
|
||||
stop_this_cpu(void)
|
||||
{
|
||||
@@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id)
|
||||
ops &= ~(1 << which);
|
||||
|
||||
switch (which) {
|
||||
case IPI_CALL_FUNC:
|
||||
handle_call_data();
|
||||
break;
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
stop_this_cpu();
|
||||
break;
|
||||
case IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
#ifdef CONFIG_KEXEC
|
||||
case IPI_KDUMP_CPU_STOP:
|
||||
unw_init_running(kdump_cpu_freeze, NULL);
|
||||
@@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
@@ -334,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask)
|
||||
void
|
||||
smp_flush_tlb_all (void)
|
||||
{
|
||||
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
|
||||
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -357,193 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm)
|
||||
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
|
||||
* rather trivial.
|
||||
*/
|
||||
on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
|
||||
on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Run a function on a specific CPU
|
||||
* <func> The function to run. This must be fast and non-blocking.
|
||||
* <info> An arbitrary pointer to pass to the function.
|
||||
* <nonatomic> Currently unused.
|
||||
* <wait> If true, wait until function has completed on other CPUs.
|
||||
* [RETURNS] 0 on success, else a negative status code.
|
||||
*
|
||||
* Does not return until the remote CPU is nearly ready to execute <func>
|
||||
* or is or has executed.
|
||||
*/
|
||||
|
||||
int
|
||||
smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
|
||||
int wait)
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
int cpus = 1;
|
||||
int me = get_cpu(); /* prevent preemption and reschedule on another processor */
|
||||
|
||||
if (cpuid == me) {
|
||||
local_irq_disable();
|
||||
func(info);
|
||||
local_irq_enable();
|
||||
put_cpu();
|
||||
return 0;
|
||||
}
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = wait;
|
||||
if (wait)
|
||||
atomic_set(&data.finished, 0);
|
||||
|
||||
spin_lock_bh(&call_lock);
|
||||
|
||||
call_data = &data;
|
||||
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
|
||||
send_IPI_single(cpuid, IPI_CALL_FUNC);
|
||||
|
||||
/* Wait for response */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
call_data = NULL;
|
||||
|
||||
spin_unlock_bh(&call_lock);
|
||||
put_cpu();
|
||||
return 0;
|
||||
send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
/**
|
||||
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
||||
* <mask> The set of cpus to run on. Must not include the current cpu.
|
||||
* <func> The function to run. This must be fast and non-blocking.
|
||||
* <info> An arbitrary pointer to pass to the function.
|
||||
* <wait> If true, wait (atomically) until function
|
||||
* has completed on other CPUs.
|
||||
*
|
||||
* Returns 0 on success, else a negative status code.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned; otherwise
|
||||
* it returns just before the target cpu calls @func.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
cpumask_t allbutself;
|
||||
int cpus;
|
||||
|
||||
spin_lock(&call_lock);
|
||||
allbutself = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), allbutself);
|
||||
|
||||
cpus_and(mask, mask, allbutself);
|
||||
cpus = cpus_weight(mask);
|
||||
if (!cpus) {
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = wait;
|
||||
if (wait)
|
||||
atomic_set(&data.finished, 0);
|
||||
|
||||
call_data = &data;
|
||||
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
|
||||
|
||||
/* Send a message to other CPUs */
|
||||
if (cpus_equal(mask, allbutself))
|
||||
send_IPI_allbutself(IPI_CALL_FUNC);
|
||||
else
|
||||
send_IPI_mask(mask, IPI_CALL_FUNC);
|
||||
|
||||
/* Wait for response */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
call_data = NULL;
|
||||
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
|
||||
send_IPI_mask(mask, IPI_CALL_FUNC);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_mask);
|
||||
|
||||
/*
|
||||
* this function sends a 'generic call function' IPI to all other CPUs
|
||||
* in the system.
|
||||
*/
|
||||
|
||||
/*
|
||||
* [SUMMARY] Run a function on all other CPUs.
|
||||
* <func> The function to run. This must be fast and non-blocking.
|
||||
* <info> An arbitrary pointer to pass to the function.
|
||||
* <nonatomic> currently unused.
|
||||
* <wait> If true, wait (atomically) until function has completed on other CPUs.
|
||||
* [RETURNS] 0 on success, else a negative status code.
|
||||
*
|
||||
* Does not return until remote CPUs are nearly ready to execute <func> or are or have
|
||||
* executed.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int
|
||||
smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
int cpus;
|
||||
|
||||
spin_lock(&call_lock);
|
||||
cpus = num_online_cpus() - 1;
|
||||
if (!cpus) {
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = wait;
|
||||
if (wait)
|
||||
atomic_set(&data.finished, 0);
|
||||
|
||||
call_data = &data;
|
||||
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
|
||||
send_IPI_allbutself(IPI_CALL_FUNC);
|
||||
|
||||
/* Wait for response */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
call_data = NULL;
|
||||
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
/*
|
||||
* this function calls the 'stop' function on all other CPUs in the system.
|
||||
|
||||
@@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master)
|
||||
|
||||
go[MASTER] = 1;
|
||||
|
||||
if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
|
||||
if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
|
||||
printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
|
||||
return;
|
||||
}
|
||||
@@ -395,14 +395,14 @@ smp_callin (void)
|
||||
|
||||
fix_b0_for_bsp();
|
||||
|
||||
lock_ipi_calllock();
|
||||
ipi_call_lock_irq();
|
||||
spin_lock(&vector_lock);
|
||||
/* Setup the per cpu irq handling data structures */
|
||||
__setup_vector_irq(cpuid);
|
||||
cpu_set(cpuid, cpu_online_map);
|
||||
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
|
||||
spin_unlock(&vector_lock);
|
||||
unlock_ipi_calllock();
|
||||
ipi_call_unlock_irq();
|
||||
|
||||
smp_setup_percpu_timer();
|
||||
|
||||
|
||||
@@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
|
||||
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
|
||||
if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
|
||||
atomic_set(&uc_pool->status, 0);
|
||||
status = smp_call_function(uncached_ipi_visibility, uc_pool,
|
||||
0, 1);
|
||||
status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
|
||||
if (status || atomic_read(&uc_pool->status))
|
||||
goto failed;
|
||||
} else if (status != PAL_VISIBILITY_OK)
|
||||
@@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
|
||||
if (status != PAL_STATUS_SUCCESS)
|
||||
goto failed;
|
||||
atomic_set(&uc_pool->status, 0);
|
||||
status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
|
||||
status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
|
||||
if (status || atomic_read(&uc_pool->status))
|
||||
goto failed;
|
||||
|
||||
|
||||
@@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
|
||||
if (use_ipi) {
|
||||
/* use an interprocessor interrupt to call SAL */
|
||||
smp_call_function_single(cpu, sn_hwperf_call_sal,
|
||||
op_info, 1, 1);
|
||||
op_info, 1);
|
||||
}
|
||||
else {
|
||||
/* migrate the task before calling SAL */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user