mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'v6.7-rc8' into locking/core, to pick up dependent changes
Pick up these commits from Linus's tree:b106bcf0f9("locking/osq_lock: Clarify osq_wait_next()")563adbfc35("locking/osq_lock: Clarify osq_wait_next() calling convention")7c22309821("locking/osq_lock: Move the definition of optimistic_spin_node into osq_lock.c") Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -3497,7 +3497,8 @@ static int alloc_chain_hlocks(int req)
|
||||
size = chain_block_size(curr);
|
||||
if (likely(size >= req)) {
|
||||
del_chain_block(0, size, chain_block_next(curr));
|
||||
add_chain_block(curr + req, size - req);
|
||||
if (size > req)
|
||||
add_chain_block(curr + req, size - req);
|
||||
return curr;
|
||||
}
|
||||
}
|
||||
|
||||
+20
-17
@@ -11,6 +11,13 @@
|
||||
* called from interrupt context and we have preemption disabled while
|
||||
* spinning.
|
||||
*/
|
||||
|
||||
struct optimistic_spin_node {
|
||||
struct optimistic_spin_node *next, *prev;
|
||||
int locked; /* 1 if lock acquired */
|
||||
int cpu; /* encoded CPU # + 1 value */
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
|
||||
|
||||
/*
|
||||
@@ -37,32 +44,28 @@ static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
|
||||
/*
|
||||
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
|
||||
* Can return NULL in case we were the last queued and we updated @lock instead.
|
||||
*
|
||||
* If osq_lock() is being cancelled there must be a previous node
|
||||
* and 'old_cpu' is its CPU #.
|
||||
* For osq_unlock() there is never a previous node and old_cpu is
|
||||
* set to OSQ_UNLOCKED_VAL.
|
||||
*/
|
||||
static inline struct optimistic_spin_node *
|
||||
osq_wait_next(struct optimistic_spin_queue *lock,
|
||||
struct optimistic_spin_node *node,
|
||||
struct optimistic_spin_node *prev)
|
||||
int old_cpu)
|
||||
{
|
||||
struct optimistic_spin_node *next = NULL;
|
||||
int curr = encode_cpu(smp_processor_id());
|
||||
int old;
|
||||
|
||||
/*
|
||||
* If there is a prev node in queue, then the 'old' value will be
|
||||
* the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
|
||||
* we're currently last in queue, then the queue will then become empty.
|
||||
*/
|
||||
old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
|
||||
|
||||
for (;;) {
|
||||
if (atomic_read(&lock->tail) == curr &&
|
||||
atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
|
||||
atomic_cmpxchg_acquire(&lock->tail, curr, old_cpu) == curr) {
|
||||
/*
|
||||
* We were the last queued, we moved @lock back. @prev
|
||||
* will now observe @lock and will complete its
|
||||
* unlock()/unqueue().
|
||||
*/
|
||||
break;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -76,15 +79,15 @@ osq_wait_next(struct optimistic_spin_queue *lock,
|
||||
* wait for a new @node->next from its Step-C.
|
||||
*/
|
||||
if (node->next) {
|
||||
struct optimistic_spin_node *next;
|
||||
|
||||
next = xchg(&node->next, NULL);
|
||||
if (next)
|
||||
break;
|
||||
return next;
|
||||
}
|
||||
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
bool osq_lock(struct optimistic_spin_queue *lock)
|
||||
@@ -186,7 +189,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
|
||||
* back to @prev.
|
||||
*/
|
||||
|
||||
next = osq_wait_next(lock, node, prev);
|
||||
next = osq_wait_next(lock, node, prev->cpu);
|
||||
if (!next)
|
||||
return false;
|
||||
|
||||
@@ -226,7 +229,7 @@ void osq_unlock(struct optimistic_spin_queue *lock)
|
||||
return;
|
||||
}
|
||||
|
||||
next = osq_wait_next(lock, node, NULL);
|
||||
next = osq_wait_next(lock, node, OSQ_UNLOCKED_VAL);
|
||||
if (next)
|
||||
WRITE_ONCE(next->locked, 1);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user