2005-04-16 15:20:36 -07:00
|
|
|
#ifndef __LINUX_PREEMPT_H
|
|
|
|
|
#define __LINUX_PREEMPT_H
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* include/linux/preempt.h - macros for accessing and manipulating
|
|
|
|
|
* preempt_count (used for kernel preemption, interrupt count, etc.)
|
|
|
|
|
*/
|
|
|
|
|
|
2005-11-13 16:06:57 -08:00
|
|
|
#include <linux/thread_info.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <linux/linkage.h>
|
2007-07-26 13:40:43 +02:00
|
|
|
#include <linux/list.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2013-08-14 14:55:31 +02:00
|
|
|
/*
|
|
|
|
|
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
|
|
|
|
|
* the other bits -- can't include that header due to inclusion hell.
|
|
|
|
|
*/
|
|
|
|
|
#define PREEMPT_NEED_RESCHED 0x80000000
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
|
|
|
|
|
* that think a non-zero value indicates we cannot preempt.
|
|
|
|
|
*/
|
2013-08-14 14:55:24 +02:00
|
|
|
static __always_inline int preempt_count(void)
|
|
|
|
|
{
|
2013-08-14 14:55:31 +02:00
|
|
|
return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
|
2013-08-14 14:55:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static __always_inline int *preempt_count_ptr(void)
|
|
|
|
|
{
|
|
|
|
|
return ¤t_thread_info()->preempt_count;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-14 14:55:31 +02:00
|
|
|
/*
|
|
|
|
|
* We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
|
|
|
|
|
* alternative is loosing a reschedule. Better schedule too often -- also this
|
|
|
|
|
* should be a very rare operation.
|
|
|
|
|
*/
|
2013-08-14 14:55:24 +02:00
|
|
|
static __always_inline void preempt_count_set(int pc)
|
|
|
|
|
{
|
|
|
|
|
*preempt_count_ptr() = pc;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-14 14:55:31 +02:00
|
|
|
/*
|
|
|
|
|
* We fold the NEED_RESCHED bit into the preempt count such that
|
|
|
|
|
* preempt_enable() can decrement and test for needing to reschedule with a
|
|
|
|
|
* single instruction.
|
|
|
|
|
*
|
|
|
|
|
* We invert the actual bit, so that when the decrement hits 0 we know we both
|
|
|
|
|
* need to resched (the bit is cleared) and can resched (no preempt count).
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static __always_inline void set_preempt_need_resched(void)
|
|
|
|
|
{
|
|
|
|
|
*preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static __always_inline void clear_preempt_need_resched(void)
|
|
|
|
|
{
|
|
|
|
|
*preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static __always_inline bool test_preempt_need_resched(void)
|
|
|
|
|
{
|
|
|
|
|
return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
|
|
|
|
|
}
|
|
|
|
|
|
2008-05-12 21:20:42 +02:00
|
|
|
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
2008-02-08 04:19:55 -08:00
|
|
|
extern void add_preempt_count(int val);
|
|
|
|
|
extern void sub_preempt_count(int val);
|
2005-04-16 15:20:36 -07:00
|
|
|
#else
|
2013-08-14 14:55:24 +02:00
|
|
|
# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0)
|
|
|
|
|
# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0)
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#define inc_preempt_count() add_preempt_count(1)
|
|
|
|
|
#define dec_preempt_count() sub_preempt_count(1)
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
|
|
|
|
|
|
asmlinkage void preempt_schedule(void);
|
|
|
|
|
|
2011-06-08 01:13:27 +02:00
|
|
|
#define preempt_check_resched() \
|
|
|
|
|
do { \
|
2013-08-14 14:55:31 +02:00
|
|
|
if (unlikely(!*preempt_count_ptr())) \
|
2011-06-08 01:13:27 +02:00
|
|
|
preempt_schedule(); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
2013-05-24 15:23:40 -04:00
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
|
|
|
|
|
|
|
|
|
void preempt_schedule_context(void);
|
|
|
|
|
|
|
|
|
|
#define preempt_check_resched_context() \
|
|
|
|
|
do { \
|
2013-08-14 14:55:31 +02:00
|
|
|
if (unlikely(!*preempt_count_ptr())) \
|
2013-05-24 15:23:40 -04:00
|
|
|
preempt_schedule_context(); \
|
|
|
|
|
} while (0)
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
#define preempt_check_resched_context() preempt_check_resched()
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_CONTEXT_TRACKING */
|
|
|
|
|
|
2011-06-08 01:13:27 +02:00
|
|
|
#else /* !CONFIG_PREEMPT */
|
|
|
|
|
|
|
|
|
|
#define preempt_check_resched() do { } while (0)
|
2013-05-24 15:23:40 -04:00
|
|
|
#define preempt_check_resched_context() do { } while (0)
|
2011-06-08 01:13:27 +02:00
|
|
|
|
|
|
|
|
#endif /* CONFIG_PREEMPT */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_COUNT
|
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
#define preempt_disable() \
|
|
|
|
|
do { \
|
|
|
|
|
inc_preempt_count(); \
|
|
|
|
|
barrier(); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
2011-03-21 13:32:17 +01:00
|
|
|
#define sched_preempt_enable_no_resched() \
|
2005-04-16 15:20:36 -07:00
|
|
|
do { \
|
|
|
|
|
barrier(); \
|
|
|
|
|
dec_preempt_count(); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
2011-03-21 13:32:17 +01:00
|
|
|
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
|
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
#define preempt_enable() \
|
|
|
|
|
do { \
|
|
|
|
|
preempt_enable_no_resched(); \
|
|
|
|
|
preempt_check_resched(); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
2008-05-12 21:20:41 +02:00
|
|
|
/* For debugging and tracer internals only! */
|
|
|
|
|
#define add_preempt_count_notrace(val) \
|
2013-08-14 14:55:24 +02:00
|
|
|
do { *preempt_count_ptr() += (val); } while (0)
|
2008-05-12 21:20:41 +02:00
|
|
|
#define sub_preempt_count_notrace(val) \
|
2013-08-14 14:55:24 +02:00
|
|
|
do { *preempt_count_ptr() -= (val); } while (0)
|
2008-05-12 21:20:41 +02:00
|
|
|
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
|
|
|
|
|
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
|
|
|
|
|
|
|
|
|
|
#define preempt_disable_notrace() \
|
|
|
|
|
do { \
|
|
|
|
|
inc_preempt_count_notrace(); \
|
|
|
|
|
barrier(); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
#define preempt_enable_no_resched_notrace() \
|
|
|
|
|
do { \
|
|
|
|
|
barrier(); \
|
|
|
|
|
dec_preempt_count_notrace(); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
/* preempt_check_resched is OK to trace */
|
|
|
|
|
#define preempt_enable_notrace() \
|
|
|
|
|
do { \
|
|
|
|
|
preempt_enable_no_resched_notrace(); \
|
2013-05-24 15:23:40 -04:00
|
|
|
preempt_check_resched_context(); \
|
2008-05-12 21:20:41 +02:00
|
|
|
} while (0)
|
|
|
|
|
|
2011-06-08 01:13:27 +02:00
|
|
|
#else /* !CONFIG_PREEMPT_COUNT */
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2013-04-09 10:48:33 -07:00
|
|
|
/*
|
|
|
|
|
* Even if we don't have any preemption, we need preempt disable/enable
|
|
|
|
|
* to be barriers, so that we don't have things like get_user/put_user
|
|
|
|
|
* that can cause faults and scheduling migrate into our preempt-protected
|
|
|
|
|
* region.
|
|
|
|
|
*/
|
|
|
|
|
#define preempt_disable() barrier()
|
|
|
|
|
#define sched_preempt_enable_no_resched() barrier()
|
|
|
|
|
#define preempt_enable_no_resched() barrier()
|
|
|
|
|
#define preempt_enable() barrier()
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2013-04-09 10:48:33 -07:00
|
|
|
#define preempt_disable_notrace() barrier()
|
|
|
|
|
#define preempt_enable_no_resched_notrace() barrier()
|
|
|
|
|
#define preempt_enable_notrace() barrier()
|
2008-05-12 21:20:41 +02:00
|
|
|
|
2011-06-08 01:13:27 +02:00
|
|
|
#endif /* CONFIG_PREEMPT_COUNT */
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2007-07-26 13:40:43 +02:00
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
|
|
|
|
|
|
|
struct preempt_notifier;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* preempt_ops - notifiers called when a task is preempted and rescheduled
|
|
|
|
|
* @sched_in: we're about to be rescheduled:
|
|
|
|
|
* notifier: struct preempt_notifier for the task being scheduled
|
|
|
|
|
* cpu: cpu we're scheduled on
|
|
|
|
|
* @sched_out: we've just been preempted
|
|
|
|
|
* notifier: struct preempt_notifier for the task being preempted
|
|
|
|
|
* next: the task that's kicking us out
|
2009-12-02 12:56:46 +09:00
|
|
|
*
|
|
|
|
|
* Please note that sched_in and out are called under different
|
|
|
|
|
* contexts. sched_out is called with rq lock held and irq disabled
|
|
|
|
|
* while sched_in is called without rq lock and irq enabled. This
|
|
|
|
|
* difference is intentional and depended upon by its users.
|
2007-07-26 13:40:43 +02:00
|
|
|
*/
|
|
|
|
|
struct preempt_ops {
|
|
|
|
|
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
|
|
|
|
|
void (*sched_out)(struct preempt_notifier *notifier,
|
|
|
|
|
struct task_struct *next);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* preempt_notifier - key for installing preemption notifiers
|
|
|
|
|
* @link: internal use
|
|
|
|
|
* @ops: defines the notifier functions to be called
|
|
|
|
|
*
|
|
|
|
|
* Usually used in conjunction with container_of().
|
|
|
|
|
*/
|
|
|
|
|
struct preempt_notifier {
|
|
|
|
|
struct hlist_node link;
|
|
|
|
|
struct preempt_ops *ops;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void preempt_notifier_register(struct preempt_notifier *notifier);
|
|
|
|
|
void preempt_notifier_unregister(struct preempt_notifier *notifier);
|
|
|
|
|
|
|
|
|
|
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
|
|
|
|
|
struct preempt_ops *ops)
|
|
|
|
|
{
|
|
|
|
|
INIT_HLIST_NODE(¬ifier->link);
|
|
|
|
|
notifier->ops = ops;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
#endif /* __LINUX_PREEMPT_H */
|