You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
sched/wait: Rename wait_queue_t => wait_queue_entry_t
Rename: wait_queue_t => wait_queue_entry_t 'wait_queue_t' was always a slight misnomer: its name implies that it's a "queue", but in reality it's a queue *entry*. The 'real' queue is the wait queue head, which had to carry the name. Start sorting this out by renaming it to 'wait_queue_entry_t'. This also allows the real structure name 'struct __wait_queue' to lose its double underscore and become 'struct wait_queue_entry', which is the more canonical nomenclature for such data types. Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -819,7 +819,7 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
|
||||
certain condition is true. They must be used carefully to ensure
|
||||
there is no race condition. You declare a
|
||||
<type>wait_queue_head_t</type>, and then processes which want to
|
||||
wait for that condition declare a <type>wait_queue_t</type>
|
||||
wait for that condition declare a <type>wait_queue_entry_t</type>
|
||||
referring to themselves, and place that in the queue.
|
||||
</para>
|
||||
|
||||
|
||||
@@ -316,7 +316,7 @@ For version 5, the format of the message is:
|
||||
struct autofs_v5_packet {
|
||||
int proto_version; /* Protocol version */
|
||||
int type; /* Type of packet */
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
__u32 dev;
|
||||
__u64 ino;
|
||||
__u32 uid;
|
||||
@@ -341,12 +341,12 @@ The pipe will be set to "packet mode" (equivalent to passing
|
||||
`O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at
|
||||
most one packet, and any unread portion of a packet will be discarded.
|
||||
|
||||
The `wait_queue_token` is a unique number which can identify a
|
||||
The `wait_queue_entry_token` is a unique number which can identify a
|
||||
particular request to be acknowledged. When a message is sent over
|
||||
the pipe the affected dentry is marked as either "active" or
|
||||
"expiring" and other accesses to it block until the message is
|
||||
acknowledged using one of the ioctls below and the relevant
|
||||
`wait_queue_token`.
|
||||
`wait_queue_entry_token`.
|
||||
|
||||
Communicating with autofs: root directory ioctls
|
||||
------------------------------------------------
|
||||
@@ -358,7 +358,7 @@ capability, or must be the automount daemon.
|
||||
The available ioctl commands are:
|
||||
|
||||
- **AUTOFS_IOC_READY**: a notification has been handled. The argument
|
||||
to the ioctl command is the "wait_queue_token" number
|
||||
to the ioctl command is the "wait_queue_entry_token" number
|
||||
corresponding to the notification being acknowledged.
|
||||
- **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with
|
||||
the error code `ENOENT`.
|
||||
@@ -382,14 +382,14 @@ The available ioctl commands are:
|
||||
struct autofs_packet_expire_multi {
|
||||
int proto_version; /* Protocol version */
|
||||
int type; /* Type of packet */
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
int len;
|
||||
char name[NAME_MAX+1];
|
||||
};
|
||||
|
||||
is required. This is filled in with the name of something
|
||||
that can be unmounted or removed. If nothing can be expired,
|
||||
`errno` is set to `EAGAIN`. Even though a `wait_queue_token`
|
||||
`errno` is set to `EAGAIN`. Even though a `wait_queue_entry_token`
|
||||
is present in the structure, no "wait queue" is established
|
||||
and no acknowledgment is needed.
|
||||
- **AUTOFS_IOC_EXPIRE_MULTI**: This is similar to
|
||||
|
||||
@@ -926,7 +926,7 @@ static bool reorder_tags_to_front(struct list_head *list)
|
||||
return first != NULL;
|
||||
}
|
||||
|
||||
static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
|
||||
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
|
||||
void *key)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
@@ -503,7 +503,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
|
||||
}
|
||||
|
||||
static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
|
||||
wait_queue_t *wait, unsigned long rw)
|
||||
wait_queue_entry_t *wait, unsigned long rw)
|
||||
{
|
||||
/*
|
||||
* inc it here even if disabled, since we'll dec it at completion.
|
||||
|
||||
@@ -99,7 +99,7 @@ struct kyber_hctx_data {
|
||||
struct list_head rqs[KYBER_NUM_DOMAINS];
|
||||
unsigned int cur_domain;
|
||||
unsigned int batching;
|
||||
wait_queue_t domain_wait[KYBER_NUM_DOMAINS];
|
||||
wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
|
||||
atomic_t wait_index[KYBER_NUM_DOMAINS];
|
||||
};
|
||||
|
||||
@@ -507,7 +507,7 @@ static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd,
|
||||
}
|
||||
}
|
||||
|
||||
static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags,
|
||||
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
|
||||
void *key)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
|
||||
@@ -523,7 +523,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
|
||||
{
|
||||
unsigned int sched_domain = khd->cur_domain;
|
||||
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
|
||||
wait_queue_t *wait = &khd->domain_wait[sched_domain];
|
||||
wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
|
||||
struct sbq_wait_state *ws;
|
||||
int nr;
|
||||
|
||||
@@ -734,7 +734,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
|
||||
{ \
|
||||
struct blk_mq_hw_ctx *hctx = data; \
|
||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||
wait_queue_t *wait = &khd->domain_wait[domain]; \
|
||||
wait_queue_entry_t *wait = &khd->domain_wait[domain]; \
|
||||
\
|
||||
seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
|
||||
return 0; \
|
||||
|
||||
@@ -602,7 +602,7 @@ static int btmrvl_service_main_thread(void *data)
|
||||
struct btmrvl_thread *thread = data;
|
||||
struct btmrvl_private *priv = thread->priv;
|
||||
struct btmrvl_adapter *adapter = priv->adapter;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct sk_buff *skb;
|
||||
ulong flags;
|
||||
|
||||
|
||||
@@ -821,7 +821,7 @@ static ssize_t ipmi_read(struct file *file,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int rv = 0;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
if (count <= 0)
|
||||
return 0;
|
||||
|
||||
@@ -123,7 +123,7 @@ struct drm_i915_gem_request {
|
||||
* It is used by the driver to then queue the request for execution.
|
||||
*/
|
||||
struct i915_sw_fence submit;
|
||||
wait_queue_t submitq;
|
||||
wait_queue_entry_t submitq;
|
||||
wait_queue_head_t execute;
|
||||
|
||||
/* A list of everyone we wait upon, and everyone who waits upon us.
|
||||
|
||||
@@ -152,7 +152,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
||||
struct list_head *continuation)
|
||||
{
|
||||
wait_queue_head_t *x = &fence->wait;
|
||||
wait_queue_t *pos, *next;
|
||||
wait_queue_entry_t *pos, *next;
|
||||
unsigned long flags;
|
||||
|
||||
debug_fence_deactivate(fence);
|
||||
@@ -254,7 +254,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
|
||||
__i915_sw_fence_commit(fence);
|
||||
}
|
||||
|
||||
static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
|
||||
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
|
||||
{
|
||||
list_del(&wq->task_list);
|
||||
__i915_sw_fence_complete(wq->private, key);
|
||||
@@ -267,7 +267,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
|
||||
static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
||||
const struct i915_sw_fence * const signaler)
|
||||
{
|
||||
wait_queue_t *wq;
|
||||
wait_queue_entry_t *wq;
|
||||
|
||||
if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
|
||||
return false;
|
||||
@@ -288,7 +288,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
||||
|
||||
static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
|
||||
{
|
||||
wait_queue_t *wq;
|
||||
wait_queue_entry_t *wq;
|
||||
|
||||
if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
|
||||
return;
|
||||
@@ -320,7 +320,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
||||
|
||||
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *signaler,
|
||||
wait_queue_t *wq, gfp_t gfp)
|
||||
wait_queue_entry_t *wq, gfp_t gfp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int pending;
|
||||
@@ -359,7 +359,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
|
||||
spin_lock_irqsave(&signaler->wait.lock, flags);
|
||||
if (likely(!i915_sw_fence_done(signaler))) {
|
||||
__add_wait_queue_tail(&signaler->wait, wq);
|
||||
__add_wait_queue_entry_tail(&signaler->wait, wq);
|
||||
pending = 1;
|
||||
} else {
|
||||
i915_sw_fence_wake(wq, 0, 0, NULL);
|
||||
@@ -372,7 +372,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
|
||||
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *signaler,
|
||||
wait_queue_t *wq)
|
||||
wait_queue_entry_t *wq)
|
||||
{
|
||||
return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
|
||||
|
||||
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *after,
|
||||
wait_queue_t *wq);
|
||||
wait_queue_entry_t *wq);
|
||||
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *after,
|
||||
gfp_t gfp);
|
||||
|
||||
@@ -375,7 +375,7 @@ struct radeon_fence {
|
||||
unsigned ring;
|
||||
bool is_vm_update;
|
||||
|
||||
wait_queue_t fence_wake;
|
||||
wait_queue_entry_t fence_wake;
|
||||
};
|
||||
|
||||
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
|
||||
|
||||
@@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
|
||||
* for the fence locking itself, so unlocked variants are used for
|
||||
* fence_signal, and remove_wait_queue.
|
||||
*/
|
||||
static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
|
||||
static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
|
||||
{
|
||||
struct radeon_fence *fence;
|
||||
u64 seq;
|
||||
|
||||
@@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
|
||||
{
|
||||
struct vga_device *vgadev, *conflict;
|
||||
unsigned long flags;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
int rc = 0;
|
||||
|
||||
vga_check_first_use();
|
||||
|
||||
@@ -1939,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev,
|
||||
bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
|
||||
{
|
||||
struct i40iw_device *iwdev;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
iwdev = dev->back_dev;
|
||||
|
||||
|
||||
@@ -207,7 +207,7 @@ void bkey_put(struct cache_set *c, struct bkey *k);
|
||||
|
||||
struct btree_op {
|
||||
/* for waiting on btree reserve in btree_split() */
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
/* Btree level at which we start taking write locks */
|
||||
short lock;
|
||||
|
||||
@@ -144,7 +144,7 @@ static inline int
|
||||
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
|
||||
{
|
||||
int errno = 0;
|
||||
wait_queue_t we;
|
||||
wait_queue_entry_t we;
|
||||
|
||||
init_waitqueue_entry(&we, current);
|
||||
add_wait_queue(wait_queue, &we);
|
||||
@@ -171,7 +171,7 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue,
|
||||
int *condition,
|
||||
int timeout)
|
||||
{
|
||||
wait_queue_t we;
|
||||
wait_queue_entry_t we;
|
||||
|
||||
init_waitqueue_entry(&we, current);
|
||||
add_wait_queue(wait_queue, &we);
|
||||
|
||||
@@ -3066,7 +3066,7 @@ static int airo_thread(void *data) {
|
||||
if (ai->jobs) {
|
||||
locked = down_interruptible(&ai->sem);
|
||||
} else {
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
init_waitqueue_entry(&wait, current);
|
||||
add_wait_queue(&ai->thr_wait, &wait);
|
||||
|
||||
@@ -2544,7 +2544,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (local->iw_mode == IW_MODE_MASTER) {
|
||||
wait_queue_t __wait;
|
||||
wait_queue_entry_t __wait;
|
||||
init_waitqueue_entry(&__wait, current);
|
||||
add_wait_queue(&local->hostscan_wq, &__wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
@@ -453,7 +453,7 @@ static int lbs_thread(void *data)
|
||||
{
|
||||
struct net_device *dev = data;
|
||||
struct lbs_private *priv = dev->ml_priv;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
lbs_deb_enter(LBS_DEB_THREAD);
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
#include <linux/wait.h>
|
||||
typedef wait_queue_head_t adpt_wait_queue_head_t;
|
||||
#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
|
||||
typedef wait_queue_t adpt_wait_queue_t;
|
||||
typedef wait_queue_entry_t adpt_wait_queue_entry_t;
|
||||
|
||||
/*
|
||||
* message structures
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user