You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits)
gameport: use this_cpu_read instead of lookup
x86: udelay: Use this_cpu_read to avoid address calculation
x86: Use this_cpu_inc_return for nmi counter
x86: Replace uses of current_cpu_data with this_cpu ops
x86: Use this_cpu_ops to optimize code
vmstat: User per cpu atomics to avoid interrupt disable / enable
irq_work: Use per cpu atomics instead of regular atomics
cpuops: Use cmpxchg for xchg to avoid lock semantics
x86: this_cpu_cmpxchg and this_cpu_xchg operations
percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
percpu,x86: relocate this_cpu_add_return() and friends
connector: Use this_cpu operations
xen: Use this_cpu_inc_return
taskstats: Use this_cpu_ops
random: Use this_cpu_inc_return
fs: Use this_cpu_inc_return in buffer.c
highmem: Use this_cpu_xx_return() operations
vmstat: Use this_cpu_inc_return for vm statistics
x86: Support for this_cpu_add, sub, dec, inc_return
percpu: Generic support for this_cpu_add, sub, dec, inc_return
...
Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c}
as per Tejun.
This commit is contained in:
+1
-1
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
|
||||
|
||||
list_del_rcu(&p->tasks);
|
||||
list_del_init(&p->sibling);
|
||||
__get_cpu_var(process_counts)--;
|
||||
__this_cpu_dec(process_counts);
|
||||
}
|
||||
list_del_rcu(&p->thread_group);
|
||||
}
|
||||
|
||||
+1
-1
@@ -1285,7 +1285,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
attach_pid(p, PIDTYPE_SID, task_session(current));
|
||||
list_add_tail(&p->sibling, &p->real_parent->children);
|
||||
list_add_tail_rcu(&p->tasks, &init_task.tasks);
|
||||
__get_cpu_var(process_counts)++;
|
||||
__this_cpu_inc(process_counts);
|
||||
}
|
||||
attach_pid(p, PIDTYPE_PID, pid);
|
||||
nr_threads++;
|
||||
|
||||
+1
-1
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void)
|
||||
*/
|
||||
static inline int hrtimer_hres_active(void)
|
||||
{
|
||||
return __get_cpu_var(hrtimer_bases).hres_active;
|
||||
return __this_cpu_read(hrtimer_bases.hres_active);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
+9
-9
@@ -77,21 +77,21 @@ void __weak arch_irq_work_raise(void)
|
||||
*/
|
||||
static void __irq_work_queue(struct irq_work *entry)
|
||||
{
|
||||
struct irq_work **head, *next;
|
||||
struct irq_work *next;
|
||||
|
||||
head = &get_cpu_var(irq_work_list);
|
||||
preempt_disable();
|
||||
|
||||
do {
|
||||
next = *head;
|
||||
next = __this_cpu_read(irq_work_list);
|
||||
/* Can assign non-atomic because we keep the flags set. */
|
||||
entry->next = next_flags(next, IRQ_WORK_FLAGS);
|
||||
} while (cmpxchg(head, next, entry) != next);
|
||||
} while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
|
||||
|
||||
/* The list was empty, raise self-interrupt to start processing. */
|
||||
if (!irq_work_next(entry))
|
||||
arch_irq_work_raise();
|
||||
|
||||
put_cpu_var(irq_work_list);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -120,16 +120,16 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
|
||||
*/
|
||||
void irq_work_run(void)
|
||||
{
|
||||
struct irq_work *list, **head;
|
||||
struct irq_work *list;
|
||||
|
||||
head = &__get_cpu_var(irq_work_list);
|
||||
if (*head == NULL)
|
||||
if (this_cpu_read(irq_work_list) == NULL)
|
||||
return;
|
||||
|
||||
BUG_ON(!in_irq());
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
list = xchg(head, NULL);
|
||||
list = this_cpu_xchg(irq_work_list, NULL);
|
||||
|
||||
while (list != NULL) {
|
||||
struct irq_work *entry = list;
|
||||
|
||||
|
||||
+4
-4
@@ -317,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
|
||||
/* We have preemption disabled.. so it is safe to use __ versions */
|
||||
static inline void set_kprobe_instance(struct kprobe *kp)
|
||||
{
|
||||
__get_cpu_var(kprobe_instance) = kp;
|
||||
__this_cpu_write(kprobe_instance, kp);
|
||||
}
|
||||
|
||||
static inline void reset_kprobe_instance(void)
|
||||
{
|
||||
__get_cpu_var(kprobe_instance) = NULL;
|
||||
__this_cpu_write(kprobe_instance, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -965,7 +965,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
|
||||
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
|
||||
int trapnr)
|
||||
{
|
||||
struct kprobe *cur = __get_cpu_var(kprobe_instance);
|
||||
struct kprobe *cur = __this_cpu_read(kprobe_instance);
|
||||
|
||||
/*
|
||||
* if we faulted "during" the execution of a user specified
|
||||
@@ -980,7 +980,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
|
||||
|
||||
static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = __get_cpu_var(kprobe_instance);
|
||||
struct kprobe *cur = __this_cpu_read(kprobe_instance);
|
||||
int ret = 0;
|
||||
|
||||
if (cur && cur->break_handler) {
|
||||
|
||||
+2
-2
@@ -364,8 +364,8 @@ void rcu_irq_exit(void)
|
||||
WARN_ON_ONCE(rdtp->dynticks & 0x1);
|
||||
|
||||
/* If the interrupt queued a callback, get out of dyntick mode. */
|
||||
if (__get_cpu_var(rcu_sched_data).nxtlist ||
|
||||
__get_cpu_var(rcu_bh_data).nxtlist)
|
||||
if (__this_cpu_read(rcu_sched_data.nxtlist) ||
|
||||
__this_cpu_read(rcu_bh_data.nxtlist))
|
||||
set_need_resched();
|
||||
}
|
||||
|
||||
|
||||
+21
-21
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
|
||||
static void wakeup_softirqd(void)
|
||||
{
|
||||
/* Interrupts are disabled: no need to stop preemption */
|
||||
struct task_struct *tsk = __get_cpu_var(ksoftirqd);
|
||||
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
|
||||
|
||||
if (tsk && tsk->state != TASK_RUNNING)
|
||||
wake_up_process(tsk);
|
||||
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
|
||||
|
||||
local_irq_save(flags);
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_vec.tail, &(t->next));
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
||||
|
||||
local_irq_save(flags);
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_hi_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
|
||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
|
||||
{
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
t->next = __get_cpu_var(tasklet_hi_vec).head;
|
||||
__get_cpu_var(tasklet_hi_vec).head = t;
|
||||
t->next = __this_cpu_read(tasklet_hi_vec.head);
|
||||
__this_cpu_write(tasklet_hi_vec.head, t);
|
||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
}
|
||||
|
||||
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a)
|
||||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_vec).head;
|
||||
__get_cpu_var(tasklet_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
|
||||
list = __this_cpu_read(tasklet_vec.head);
|
||||
__this_cpu_write(tasklet_vec.head, NULL);
|
||||
__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a)
|
||||
|
||||
local_irq_disable();
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_vec.tail, &(t->next));
|
||||
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_hi_vec).head;
|
||||
__get_cpu_var(tasklet_hi_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
|
||||
list = __this_cpu_read(tasklet_hi_vec.head);
|
||||
__this_cpu_write(tasklet_hi_vec.head, NULL);
|
||||
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||
|
||||
local_irq_disable();
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_hi_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
|
||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu)
|
||||
|
||||
/* Find end, append list for that CPU. */
|
||||
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
|
||||
*(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
|
||||
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
|
||||
this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
|
||||
per_cpu(tasklet_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
|
||||
}
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
|
||||
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
|
||||
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
|
||||
__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
|
||||
per_cpu(tasklet_hi_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
|
||||
}
|
||||
|
||||
+2
-3
@@ -89,8 +89,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
|
||||
return -ENOMEM;
|
||||
|
||||
if (!info) {
|
||||
int seq = get_cpu_var(taskstats_seqnum)++;
|
||||
put_cpu_var(taskstats_seqnum);
|
||||
int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
|
||||
|
||||
reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
|
||||
} else
|
||||
@@ -612,7 +611,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
|
||||
fill_tgid_exit(tsk);
|
||||
}
|
||||
|
||||
listeners = &__raw_get_cpu_var(listener_array);
|
||||
listeners = __this_cpu_ptr(&listener_array);
|
||||
if (list_empty(&listeners->list))
|
||||
return;
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu)
|
||||
*/
|
||||
int tick_is_oneshot_available(void)
|
||||
{
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
|
||||
return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
|
||||
*/
|
||||
int tick_program_event(ktime_t expires, int force)
|
||||
{
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
|
||||
return tick_dev_program_event(dev, expires, force);
|
||||
}
|
||||
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void)
|
||||
int ret;
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT;
|
||||
ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
|
||||
+18
-18
@@ -118,12 +118,12 @@ static void __touch_watchdog(void)
|
||||
{
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
__get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
|
||||
__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
{
|
||||
__raw_get_cpu_var(watchdog_touch_ts) = 0;
|
||||
__this_cpu_write(watchdog_touch_ts, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||
|
||||
@@ -167,12 +167,12 @@ void touch_softlockup_watchdog_sync(void)
|
||||
/* watchdog detector functions */
|
||||
static int is_hardlockup(void)
|
||||
{
|
||||
unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
|
||||
unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
|
||||
|
||||
if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
|
||||
if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
|
||||
return 1;
|
||||
|
||||
__get_cpu_var(hrtimer_interrupts_saved) = hrint;
|
||||
__this_cpu_write(hrtimer_interrupts_saved, hrint);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -205,8 +205,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||
/* Ensure the watchdog never gets throttled */
|
||||
event->hw.interrupts = 0;
|
||||
|
||||
if (__get_cpu_var(watchdog_nmi_touch) == true) {
|
||||
__get_cpu_var(watchdog_nmi_touch) = false;
|
||||
if (__this_cpu_read(watchdog_nmi_touch) == true) {
|
||||
__this_cpu_write(watchdog_nmi_touch, false);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
/* only print hardlockups once */
|
||||
if (__get_cpu_var(hard_watchdog_warn) == true)
|
||||
if (__this_cpu_read(hard_watchdog_warn) == true)
|
||||
return;
|
||||
|
||||
if (hardlockup_panic)
|
||||
@@ -228,16 +228,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
||||
else
|
||||
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
|
||||
|
||||
__get_cpu_var(hard_watchdog_warn) = true;
|
||||
__this_cpu_write(hard_watchdog_warn, true);
|
||||
return;
|
||||
}
|
||||
|
||||
__get_cpu_var(hard_watchdog_warn) = false;
|
||||
__this_cpu_write(hard_watchdog_warn, false);
|
||||
return;
|
||||
}
|
||||
static void watchdog_interrupt_count(void)
|
||||
{
|
||||
__get_cpu_var(hrtimer_interrupts)++;
|
||||
__this_cpu_inc(hrtimer_interrupts);
|
||||
}
|
||||
#else
|
||||
static inline void watchdog_interrupt_count(void) { return; }
|
||||
@@ -246,7 +246,7 @@ static inline void watchdog_interrupt_count(void) { return; }
|
||||
/* watchdog kicker functions */
|
||||
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
{
|
||||
unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
|
||||
unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
int duration;
|
||||
|
||||
@@ -254,18 +254,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
watchdog_interrupt_count();
|
||||
|
||||
/* kick the softlockup detector */
|
||||
wake_up_process(__get_cpu_var(softlockup_watchdog));
|
||||
wake_up_process(__this_cpu_read(softlockup_watchdog));
|
||||
|
||||
/* .. and repeat */
|
||||
hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
|
||||
|
||||
if (touch_ts == 0) {
|
||||
if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
|
||||
if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
|
||||
/*
|
||||
* If the time stamp was touched atomically
|
||||
* make sure the scheduler tick is up to date.
|
||||
*/
|
||||
__get_cpu_var(softlockup_touch_sync) = false;
|
||||
__this_cpu_write(softlockup_touch_sync, false);
|
||||
sched_clock_tick();
|
||||
}
|
||||
__touch_watchdog();
|
||||
@@ -281,7 +281,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
duration = is_softlockup(touch_ts);
|
||||
if (unlikely(duration)) {
|
||||
/* only warn once */
|
||||
if (__get_cpu_var(soft_watchdog_warn) == true)
|
||||
if (__this_cpu_read(soft_watchdog_warn) == true)
|
||||
return HRTIMER_RESTART;
|
||||
|
||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
|
||||
@@ -296,9 +296,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
|
||||
if (softlockup_panic)
|
||||
panic("softlockup: hung tasks");
|
||||
__get_cpu_var(soft_watchdog_warn) = true;
|
||||
__this_cpu_write(soft_watchdog_warn, true);
|
||||
} else
|
||||
__get_cpu_var(soft_watchdog_warn) = false;
|
||||
__this_cpu_write(soft_watchdog_warn, false);
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user