mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Acked-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Matthew Wilcox
parent
e48b3deee4
commit
64ac24e738
@@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS)
|
||||
EXTRA_CFLAGS := -Werror -Wno-sign-compare
|
||||
|
||||
obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
|
||||
irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \
|
||||
irq_alpha.o signal.o setup.o ptrace.o time.o \
|
||||
alpha_ksyms.o systbls.o err_common.o io.o
|
||||
|
||||
obj-$(CONFIG_VGA_HOSE) += console.o
|
||||
|
||||
@@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user);
|
||||
EXPORT_SYMBOL(__strncpy_from_user);
|
||||
EXPORT_SYMBOL(__strnlen_user);
|
||||
|
||||
/* Semaphore helper functions. */
|
||||
EXPORT_SYMBOL(__down_failed);
|
||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
||||
EXPORT_SYMBOL(__up_wakeup);
|
||||
EXPORT_SYMBOL(down);
|
||||
EXPORT_SYMBOL(down_interruptible);
|
||||
EXPORT_SYMBOL(down_trylock);
|
||||
EXPORT_SYMBOL(up);
|
||||
|
||||
/*
|
||||
* SMP-specific symbols.
|
||||
*/
|
||||
|
||||
@@ -1,224 +0,0 @@
|
||||
/*
|
||||
* Alpha semaphore implementation.
|
||||
*
|
||||
* (C) Copyright 1996 Linus Torvalds
|
||||
* (C) Copyright 1999, 2000 Richard Henderson
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
/*
|
||||
* This is basically the PPC semaphore scheme ported to use
|
||||
* the Alpha ll/sc sequences, so see the PPC code for
|
||||
* credits.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Atomically update sem->count.
|
||||
* This does the equivalent of the following:
|
||||
*
|
||||
* old_count = sem->count;
|
||||
* tmp = MAX(old_count, 0) + incr;
|
||||
* sem->count = tmp;
|
||||
* return old_count;
|
||||
*/
|
||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
||||
{
|
||||
long old_count, tmp = 0;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%2\n"
|
||||
" cmovgt %0,%0,%1\n"
|
||||
" addl %1,%3,%1\n"
|
||||
" stl_c %1,%2\n"
|
||||
" beq %1,2f\n"
|
||||
" mb\n"
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
|
||||
: "Ir" (incr), "1" (tmp), "m" (sem->count));
|
||||
|
||||
return old_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the "down" function. Return zero for semaphore acquired,
|
||||
* return negative for signalled out of the function.
|
||||
*
|
||||
* If called from down, the return is ignored and the wait loop is
|
||||
* not interruptible. This means that a task waiting on a semaphore
|
||||
* using "down()" cannot be killed until someone does an "up()" on
|
||||
* the semaphore.
|
||||
*
|
||||
* If called from down_interruptible, the return value gets checked
|
||||
* upon return. If the return value is negative then the task continues
|
||||
* with the negative value in the return register (it can be tested by
|
||||
* the caller).
|
||||
*
|
||||
* Either form may be used in conjunction with "up()".
|
||||
*/
|
||||
|
||||
void __sched
|
||||
__down_failed(struct semaphore *sem)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): down failed(%p)\n",
|
||||
tsk->comm, task_pid_nr(tsk), sem);
|
||||
#endif
|
||||
|
||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
||||
wmb();
|
||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
||||
|
||||
/*
|
||||
* Try to get the semaphore. If the count is > 0, then we've
|
||||
* got the semaphore; we decrement count and exit the loop.
|
||||
* If the count is 0 or negative, we set it to -1, indicating
|
||||
* that we are asleep, and then sleep.
|
||||
*/
|
||||
while (__sem_update_count(sem, -1) <= 0) {
|
||||
schedule();
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
tsk->state = TASK_RUNNING;
|
||||
|
||||
/*
|
||||
* If there are any more sleepers, wake one of them up so
|
||||
* that it can either get the semaphore, or set count to -1
|
||||
* indicating that there are still processes sleeping.
|
||||
*/
|
||||
wake_up(&sem->wait);
|
||||
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): down acquired(%p)\n",
|
||||
tsk->comm, task_pid_nr(tsk), sem);
|
||||
#endif
|
||||
}
|
||||
|
||||
int __sched
|
||||
__down_failed_interruptible(struct semaphore *sem)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
long ret = 0;
|
||||
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): down failed(%p)\n",
|
||||
tsk->comm, task_pid_nr(tsk), sem);
|
||||
#endif
|
||||
|
||||
tsk->state = TASK_INTERRUPTIBLE;
|
||||
wmb();
|
||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
||||
|
||||
while (__sem_update_count(sem, -1) <= 0) {
|
||||
if (signal_pending(current)) {
|
||||
/*
|
||||
* A signal is pending - give up trying.
|
||||
* Set sem->count to 0 if it is negative,
|
||||
* since we are no longer sleeping.
|
||||
*/
|
||||
__sem_update_count(sem, 0);
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
tsk->state = TASK_RUNNING;
|
||||
wake_up(&sem->wait);
|
||||
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): down %s(%p)\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
(ret < 0 ? "interrupted" : "acquired"), sem);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
__up_wakeup(struct semaphore *sem)
|
||||
{
|
||||
/*
|
||||
* Note that we incremented count in up() before we came here,
|
||||
* but that was ineffective since the result was <= 0, and
|
||||
* any negative value of count is equivalent to 0.
|
||||
* This ends up setting count to 1, unless count is now > 0
|
||||
* (i.e. because some other cpu has called up() in the meantime),
|
||||
* in which case we just increment count.
|
||||
*/
|
||||
__sem_update_count(sem, 1);
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
void __sched
|
||||
down(struct semaphore *sem)
|
||||
{
|
||||
#ifdef WAITQUEUE_DEBUG
|
||||
CHECK_MAGIC(sem->__magic);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): down(%p) <count=%d> from %p\n",
|
||||
current->comm, task_pid_nr(current), sem,
|
||||
atomic_read(&sem->count), __builtin_return_address(0));
|
||||
#endif
|
||||
__down(sem);
|
||||
}
|
||||
|
||||
int __sched
|
||||
down_interruptible(struct semaphore *sem)
|
||||
{
|
||||
#ifdef WAITQUEUE_DEBUG
|
||||
CHECK_MAGIC(sem->__magic);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): down(%p) <count=%d> from %p\n",
|
||||
current->comm, task_pid_nr(current), sem,
|
||||
atomic_read(&sem->count), __builtin_return_address(0));
|
||||
#endif
|
||||
return __down_interruptible(sem);
|
||||
}
|
||||
|
||||
int
|
||||
down_trylock(struct semaphore *sem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
#ifdef WAITQUEUE_DEBUG
|
||||
CHECK_MAGIC(sem->__magic);
|
||||
#endif
|
||||
|
||||
ret = __down_trylock(sem);
|
||||
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): down_trylock %s from %p\n",
|
||||
current->comm, task_pid_nr(current),
|
||||
ret ? "failed" : "acquired",
|
||||
__builtin_return_address(0));
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
up(struct semaphore *sem)
|
||||
{
|
||||
#ifdef WAITQUEUE_DEBUG
|
||||
CHECK_MAGIC(sem->__magic);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
printk("%s(%d): up(%p) <count=%d> from %p\n",
|
||||
current->comm, task_pid_nr(current), sem,
|
||||
atomic_read(&sem->count), __builtin_return_address(0));
|
||||
#endif
|
||||
__up(sem);
|
||||
}
|
||||
@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||
# Object file lists.
|
||||
|
||||
obj-y := compat.o entry-armv.o entry-common.o irq.o \
|
||||
process.o ptrace.o semaphore.o setup.o signal.o \
|
||||
process.o ptrace.o setup.o signal.o \
|
||||
sys_arm.o stacktrace.o time.o traps.o
|
||||
|
||||
obj-$(CONFIG_ISA_DMA_API) += dma.o
|
||||
|
||||
@@ -1,221 +0,0 @@
|
||||
/*
|
||||
* ARM semaphore implementation, taken from
|
||||
*
|
||||
* i386 semaphore implementation.
|
||||
*
|
||||
* (C) Copyright 1999 Linus Torvalds
|
||||
*
|
||||
* Modified for ARM by Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
/*
|
||||
* Semaphores are implemented using a two-way counter:
|
||||
* The "count" variable is decremented for each process
|
||||
* that tries to acquire the semaphore, while the "sleeping"
|
||||
* variable is a count of such acquires.
|
||||
*
|
||||
* Notably, the inline "up()" and "down()" functions can
|
||||
* efficiently test if they need to do any extra work (up
|
||||
* needs to do something only if count was negative before
|
||||
* the increment operation.
|
||||
*
|
||||
* "sleeping" and the contention routine ordering is
|
||||
* protected by the semaphore spinlock.
|
||||
*
|
||||
* Note that these functions are only called when there is
|
||||
* contention on the lock, and as such all this is the
|
||||
* "non-critical" part of the whole semaphore business. The
|
||||
* critical part is the inline stuff in <asm/semaphore.h>
|
||||
* where we want to avoid any extra jumps and calls.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Logic:
|
||||
* - only on a boundary condition do we need to care. When we go
|
||||
* from a negative count to a non-negative, we wake people up.
|
||||
* - when we go from a non-negative count to a negative do we
|
||||
* (a) synchronize with the "sleeper" count and (b) make sure
|
||||
* that we're on the wakeup list before we synchronize so that
|
||||
* we cannot lose wakeup events.
|
||||
*/
|
||||
|
||||
void __up(struct semaphore *sem)
|
||||
{
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(semaphore_lock);
|
||||
|
||||
void __sched __down(struct semaphore * sem)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
||||
|
||||
spin_lock_irq(&semaphore_lock);
|
||||
sem->sleepers++;
|
||||
for (;;) {
|
||||
int sleepers = sem->sleepers;
|
||||
|
||||
/*
|
||||
* Add "everybody else" into it. They aren't
|
||||
* playing, because we own the spinlock.
|
||||
*/
|
||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
||||
sem->sleepers = 0;
|
||||
break;
|
||||
}
|
||||
sem->sleepers = 1; /* us - see -1 above */
|
||||
spin_unlock_irq(&semaphore_lock);
|
||||
|
||||
schedule();
|
||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
||||
spin_lock_irq(&semaphore_lock);
|
||||
}
|
||||
spin_unlock_irq(&semaphore_lock);
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
tsk->state = TASK_RUNNING;
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
int __sched __down_interruptible(struct semaphore * sem)
|
||||
{
|
||||
int retval = 0;
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
tsk->state = TASK_INTERRUPTIBLE;
|
||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
||||
|
||||
spin_lock_irq(&semaphore_lock);
|
||||
sem->sleepers ++;
|
||||
for (;;) {
|
||||
int sleepers = sem->sleepers;
|
||||
|
||||
/*
|
||||
* With signals pending, this turns into
|
||||
* the trylock failure case - we won't be
|
||||
* sleeping, and we* can't get the lock as
|
||||
* it has contention. Just correct the count
|
||||
* and exit.
|
||||
*/
|
||||
if (signal_pending(current)) {
|
||||
retval = -EINTR;
|
||||
sem->sleepers = 0;
|
||||
atomic_add(sleepers, &sem->count);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add "everybody else" into it. They aren't
|
||||
* playing, because we own the spinlock. The
|
||||
* "-1" is because we're still hoping to get
|
||||
* the lock.
|
||||
*/
|
||||
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
|
||||
sem->sleepers = 0;
|
||||
break;
|
||||
}
|
||||
sem->sleepers = 1; /* us - see -1 above */
|
||||
spin_unlock_irq(&semaphore_lock);
|
||||
|
||||
schedule();
|
||||
tsk->state = TASK_INTERRUPTIBLE;
|
||||
spin_lock_irq(&semaphore_lock);
|
||||
}
|
||||
spin_unlock_irq(&semaphore_lock);
|
||||
tsk->state = TASK_RUNNING;
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
wake_up(&sem->wait);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trylock failed - make sure we correct for
|
||||
* having decremented the count.
|
||||
*
|
||||
* We could have done the trylock with a
|
||||
* single "cmpxchg" without failure cases,
|
||||
* but then it wouldn't work on a 386.
|
||||
*/
|
||||
int __down_trylock(struct semaphore * sem)
|
||||
{
|
||||
int sleepers;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&semaphore_lock, flags);
|
||||
sleepers = sem->sleepers + 1;
|
||||
sem->sleepers = 0;
|
||||
|
||||
/*
|
||||
* Add "everybody else" and us into it. They aren't
|
||||
* playing, because we own the spinlock.
|
||||
*/
|
||||
if (!atomic_add_negative(sleepers, &sem->count))
|
||||
wake_up(&sem->wait);
|
||||
|
||||
spin_unlock_irqrestore(&semaphore_lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The semaphore operations have a special calling sequence that
|
||||
* allow us to do a simpler in-line version of them. These routines
|
||||
* need to convert that sequence back into the C sequence when
|
||||
* there is contention on the semaphore.
|
||||
*
|
||||
* ip contains the semaphore pointer on entry. Save the C-clobbered
|
||||
* registers (r0 to r3 and lr), but not ip, as we use it as a return
|
||||
* value in some cases..
|
||||
* To remain AAPCS compliant (64-bit stack align) we save r4 as well.
|
||||
*/
|
||||
asm(" .section .sched.text,\"ax\",%progbits \n\
|
||||
.align 5 \n\
|
||||
.globl __down_failed \n\
|
||||
__down_failed: \n\
|
||||
stmfd sp!, {r0 - r4, lr} \n\
|
||||
mov r0, ip \n\
|
||||
bl __down \n\
|
||||
ldmfd sp!, {r0 - r4, pc} \n\
|
||||
\n\
|
||||
.align 5 \n\
|
||||
.globl __down_interruptible_failed \n\
|
||||
__down_interruptible_failed: \n\
|
||||
stmfd sp!, {r0 - r4, lr} \n\
|
||||
mov r0, ip \n\
|
||||
bl __down_interruptible \n\
|
||||
mov ip, r0 \n\
|
||||
ldmfd sp!, {r0 - r4, pc} \n\
|
||||
\n\
|
||||
.align 5 \n\
|
||||
.globl __down_trylock_failed \n\
|
||||
__down_trylock_failed: \n\
|
||||
stmfd sp!, {r0 - r4, lr} \n\
|
||||
mov r0, ip \n\
|
||||
bl __down_trylock \n\
|
||||
mov ip, r0 \n\
|
||||
ldmfd sp!, {r0 - r4, pc} \n\
|
||||
\n\
|
||||
.align 5 \n\
|
||||
.globl __up_wakeup \n\
|
||||
__up_wakeup: \n\
|
||||
stmfd sp!, {r0 - r4, lr} \n\
|
||||
mov r0, ip \n\
|
||||
bl __up \n\
|
||||
ldmfd sp!, {r0 - r4, pc} \n\
|
||||
");
|
||||
|
||||
EXPORT_SYMBOL(__down_failed);
|
||||
EXPORT_SYMBOL(__down_interruptible_failed);
|
||||
EXPORT_SYMBOL(__down_trylock_failed);
|
||||
EXPORT_SYMBOL(__up_wakeup);
|
||||
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
|
||||
|
||||
obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
|
||||
obj-y += syscall_table.o syscall-stubs.o irq.o
|
||||
obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o
|
||||
obj-y += setup.o traps.o ocd.o ptrace.o
|
||||
obj-y += signal.o sys_avr32.o process.o time.o
|
||||
obj-y += init_task.o switch_to.o cpu.o
|
||||
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
/*
|
||||
* AVR32 sempahore implementation.
|
||||
*
|
||||
* Copyright (C) 2004-2006 Atmel Corporation
|
||||
*
|
||||
* Based on linux/arch/i386/kernel/semaphore.c
|
||||
* Copyright (C) 1999 Linus Torvalds
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* Semaphores are implemented using a two-way counter:
|
||||
* The "count" variable is decremented for each process
|
||||
* that tries to acquire the semaphore, while the "sleeping"
|
||||
* variable is a count of such acquires.
|
||||
*
|
||||
* Notably, the inline "up()" and "down()" functions can
|
||||
* efficiently test if they need to do any extra work (up
|
||||
* needs to do something only if count was negative before
|
||||
* the increment operation.
|
||||
*
|
||||
* "sleeping" and the contention routine ordering is protected
|
||||
* by the spinlock in the semaphore's waitqueue head.
|
||||
*
|
||||
* Note that these functions are only called when there is
|
||||
* contention on the lock, and as such all this is the
|
||||
* "non-critical" part of the whole semaphore business. The
|
||||
* critical part is the inline stuff in <asm/semaphore.h>
|
||||
* where we want to avoid any extra jumps and calls.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Logic:
|
||||
* - only on a boundary condition do we need to care. When we go
|
||||
* from a negative count to a non-negative, we wake people up.
|
||||
* - when we go from a non-negative count to a negative do we
|
||||
* (a) synchronize with the "sleeper" count and (b) make sure
|
||||
* that we're on the wakeup list before we synchronize so that
|
||||
* we cannot lose wakeup events.
|
||||
*/
|
||||
|
||||
void __up(struct semaphore *sem)
|
||||
{
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
EXPORT_SYMBOL(__up);
|
||||
|
||||
void __sched __down(struct semaphore *sem)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
unsigned long flags;
|
||||
|
||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
||||
|
||||
sem->sleepers++;
|
||||
for (;;) {
|
||||
int sleepers = sem->sleepers;
|
||||
|
||||
/*
|
||||
* Add "everybody else" into it. They aren't
|
||||
* playing, because we own the spinlock in
|
||||
* the wait_queue_head.
|
||||
*/
|
||||
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
|
||||
sem->sleepers = 0;
|
||||
break;
|
||||
}
|
||||
sem->sleepers = 1; /* us - see -1 above */
|
||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
||||
|
||||
schedule();
|
||||
|
||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
||||
tsk->state = TASK_UNINTERRUPTIBLE;
|
||||
}
|
||||
remove_wait_queue_locked(&sem->wait, &wait);
|
||||
wake_up_locked(&sem->wait);
|
||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
||||
tsk->state = TASK_RUNNING;
|
||||
}
|
||||
EXPORT_SYMBOL(__down);
|
||||
|
||||
int __sched __down_interruptible(struct semaphore *sem)
|
||||
{
|
||||
int retval = 0;
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
unsigned long flags;
|
||||
|
||||
tsk->state = TASK_INTERRUPTIBLE;
|
||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
||||
add_wait_queue_exclusive_locked(&sem->wait, &wait);
|
||||
|
||||
sem->sleepers++;
|
||||
for (;;) {
|
||||
int sleepers = sem->sleepers;
|
||||
|
||||
/*
|
||||
* With signals pending, this turns into the trylock
|
||||
* failure case - we won't be sleeping, and we can't
|
||||
* get the lock as it has contention. Just correct the
|
||||
* count and exit.
|
||||
*/
|
||||
if (signal_pending(current)) {
|
||||
retval = -EINTR;
|
||||
sem->sleepers = 0;
|
||||
atomic_add(sleepers, &sem->count);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add "everybody else" into it. They aren't
|
||||
* playing, because we own the spinlock in
|
||||
* the wait_queue_head.
|
||||
*/
|
||||
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
|
||||
sem->sleepers = 0;
|
||||
break;
|
||||
}
|
||||
sem->sleepers = 1; /* us - see -1 above */
|
||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
||||
|
||||
schedule();
|
||||
|
||||
spin_lock_irqsave(&sem->wait.lock, flags);
|
||||
tsk->state = TASK_INTERRUPTIBLE;
|
||||
}
|
||||
remove_wait_queue_locked(&sem->wait, &wait);
|
||||
wake_up_locked(&sem->wait);
|
||||
spin_unlock_irqrestore(&sem->wait.lock, flags);
|
||||
|
||||
tsk->state = TASK_RUNNING;
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(__down_interruptible);
|
||||
@@ -31,10 +31,6 @@ config ZONE_DMA
|
||||
bool
|
||||
default y
|
||||
|
||||
config SEMAPHORE_SLEEPERS
|
||||
bool
|
||||
default y
|
||||
|
||||
config GENERIC_FIND_NEXT_BIT
|
||||
bool
|
||||
default y
|
||||
|
||||
@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum);
|
||||
|
||||
EXPORT_SYMBOL(kernel_thread);
|
||||
|
||||
EXPORT_SYMBOL(__up);
|
||||
EXPORT_SYMBOL(__down);
|
||||
EXPORT_SYMBOL(__down_trylock);
|
||||
EXPORT_SYMBOL(__down_interruptible);
|
||||
|
||||
EXPORT_SYMBOL(is_in_rom);
|
||||
EXPORT_SYMBOL(bfin_return_from_exception);
|
||||
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
|
||||
extra-y := vmlinux.lds
|
||||
|
||||
obj-y := process.o traps.o irq.o ptrace.o setup.o \
|
||||
time.o sys_cris.o semaphore.o
|
||||
obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
|
||||
|
||||
obj-$(CONFIG_MODULES) += crisksyms.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/tty.h>
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/checksum.h>
|
||||
@@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2);
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
|
||||
/* Semaphore functions */
|
||||
EXPORT_SYMBOL(__up);
|
||||
EXPORT_SYMBOL(__down);
|
||||
EXPORT_SYMBOL(__down_interruptible);
|
||||
EXPORT_SYMBOL(__down_trylock);
|
||||
|
||||
/* Userspace access functions */
|
||||
EXPORT_SYMBOL(__copy_user_zeroing);
|
||||
EXPORT_SYMBOL(__copy_user);
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
/*
|
||||
* Generic semaphore code. Buyer beware. Do your own
|
||||
* specific changes in <asm/semaphore-helper.h>
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <asm/semaphore-helper.h>
|
||||
|
||||
/*
|
||||
* Semaphores are implemented using a two-way counter:
|
||||
* The "count" variable is decremented for each process
|
||||
* that tries to sleep, while the "waking" variable is
|
||||
* incremented when the "up()" code goes to wake up waiting
|
||||
* processes.
|
||||
*
|
||||
* Notably, the inline "up()" and "down()" functions can
|
||||
* efficiently test if they need to do any extra work (up
|
||||
* needs to do something only if count was negative before
|
||||
* the increment operation.
|
||||
*
|
||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
||||
* atomically.
|
||||
*
|
||||
* When __up() is called, the count was negative before
|
||||
* incrementing it, and we need to wake up somebody.
|
||||
*
|
||||
* This routine adds one to the count of processes that need to
|
||||
* wake up and exit. ALL waiting processes actually wake up but
|
||||
* only the one that gets to the "waking" field first will gate
|
||||
* through and acquire the semaphore. The others will go back
|
||||
* to sleep.
|
||||
*
|
||||
* Note that these functions are only called when there is
|
||||
* contention on the lock, and as such all this is the
|
||||
* "non-critical" part of the whole semaphore business. The
|
||||
* critical part is the inline stuff in <asm/semaphore.h>
|
||||
* where we want to avoid any extra jumps and calls.
|
||||
*/
|
||||
void __up(struct semaphore *sem)
|
||||
{
|
||||
wake_one_more(sem);
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the "down" function. Return zero for semaphore acquired,
|
||||
* return negative for signalled out of the function.
|
||||
*
|
||||
* If called from __down, the return is ignored and the wait loop is
|
||||
* not interruptible. This means that a task waiting on a semaphore
|
||||
* using "down()" cannot be killed until someone does an "up()" on
|
||||
* the semaphore.
|
||||
*
|
||||
* If called from __down_interruptible, the return value gets checked
|
||||
* upon return. If the return value is negative then the task continues
|
||||
* with the negative value in the return register (it can be tested by
|
||||
* the caller).
|
||||
*
|
||||
* Either form may be used in conjunction with "up()".
|
||||
*
|
||||
*/
|
||||
|
||||
#define DOWN_VAR \
|
||||
struct task_struct *tsk = current; \
|
||||
wait_queue_t wait; \
|
||||
init_waitqueue_entry(&wait, tsk);
|
||||
|
||||
#define DOWN_HEAD(task_state) \
|
||||
\
|
||||
\
|
||||
tsk->state = (task_state); \
|
||||
add_wait_queue(&sem->wait, &wait); \
|
||||
\
|
||||
/* \
|
||||
* Ok, we're set up. sem->count is known to be less than zero \
|
||||
* so we must wait. \
|
||||
* \
|
||||
* We can let go the lock for purposes of waiting. \
|
||||
* We re-acquire it after awaking so as to protect \
|
||||
* all semaphore operations. \
|
||||
* \
|
||||
* If "up()" is called before we call waking_non_zero() then \
|
||||
* we will catch it right away. If it is called later then \
|
||||
* we will have to go through a wakeup cycle to catch it. \
|
||||
* \
|
||||
* Multiple waiters contend for the semaphore lock to see \
|
||||
* who gets to gate through and who has to wait some more. \
|
||||
*/ \
|
||||
for (;;) {
|
||||
|
||||
#define DOWN_TAIL(task_state) \
|
||||
tsk->state = (task_state); \
|
||||
} \
|
||||
tsk->state = TASK_RUNNING; \
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
|
||||
void __sched __down(struct semaphore * sem)
|
||||
{
|
||||
DOWN_VAR
|
||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
||||
if (waking_non_zero(sem))
|
||||
break;
|
||||
schedule();
|
||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
||||
}
|
||||
|
||||
int __sched __down_interruptible(struct semaphore * sem)
|
||||
{
|
||||
int ret = 0;
|
||||
DOWN_VAR
|
||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
||||
|
||||
ret = waking_non_zero_interruptible(sem, tsk);
|
||||
if (ret)
|
||||
{
|
||||
if (ret == 1)
|
||||
/* ret != 0 only if we get interrupted -arca */
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __down_trylock(struct semaphore * sem)
|
||||
{
|
||||
return waking_non_zero_trylock(sem);
|
||||
}
|
||||
@@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds
|
||||
|
||||
obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
|
||||
kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
|
||||
sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \
|
||||
sys_frv.o time.o setup.o frv_ksyms.o \
|
||||
debug-stub.o irq.o sleep.o uaccess.o
|
||||
|
||||
obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
/* semaphore.c: FR-V semaphores
|
||||
*
|
||||
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
* - Derived from lib/rwsem-spinlock.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
struct sem_waiter {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_SEMAPHORE
|
||||
void semtrace(struct semaphore *sem, const char *str)
|
||||
{
|
||||
if (sem->debug)
|
||||
printk("[%d] %s({%d,%d})\n",
|
||||
current->pid,
|
||||
str,
|
||||
sem->counter,
|
||||
list_empty(&sem->wait_list) ? 0 : 1);
|
||||
}
|
||||
#else
|
||||
#define semtrace(SEM,STR) do { } while(0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* wait for a token to be granted from a semaphore
|
||||
* - entered with lock held and interrupts disabled
|
||||
*/
|
||||
void __down(struct semaphore *sem, unsigned long flags)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct sem_waiter waiter;
|
||||
|
||||
semtrace(sem, "Entering __down");
|
||||
|
||||
/* set up my own style of waitqueue */
|
||||
waiter.task = tsk;
|
||||
get_task_struct(tsk);
|
||||
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
|
||||
/* we don't need to touch the semaphore struct anymore */
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
/* wait to be given the semaphore */
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
for (;;) {
|
||||
if (list_empty(&waiter.list))
|
||||
break;
|
||||
schedule();
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
tsk->state = TASK_RUNNING;
|
||||
semtrace(sem, "Leaving __down");
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__down);
|
||||
|
||||
/*
|
||||
* interruptibly wait for a token to be granted from a semaphore
|
||||
* - entered with lock held and interrupts disabled
|
||||
*/
|
||||
int __down_interruptible(struct semaphore *sem, unsigned long flags)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct sem_waiter waiter;
|
||||
int ret;
|
||||
|
||||
semtrace(sem,"Entering __down_interruptible");
|
||||
|
||||
/* set up my own style of waitqueue */
|
||||
waiter.task = tsk;
|
||||
get_task_struct(tsk);
|
||||
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
|
||||
/* we don't need to touch the semaphore struct anymore */
|
||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
/* wait to be given the semaphore */
|
||||
ret = 0;
|
||||
for (;;) {
|
||||
if (list_empty(&waiter.list))
|
||||
break;
|
||||
if (unlikely(signal_pending(current)))
|
||||
goto interrupted;
|
||||
schedule();
|
||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
out:
|
||||
tsk->state = TASK_RUNNING;
|
||||
semtrace(sem, "Leaving __down_interruptible");
|
||||
return ret;
|
||||
|
||||
interrupted:
|
||||
spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
if (!list_empty(&waiter.list)) {
|
||||
list_del(&waiter.list);
|
||||
ret = -EINTR;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
if (ret == -EINTR)
|
||||
put_task_struct(current);
|
||||
goto out;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__down_interruptible);
|
||||
|
||||
/*
|
||||
* release a single token back to a semaphore
|
||||
* - entered with lock held and interrupts disabled
|
||||
*/
|
||||
void __up(struct semaphore *sem)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
struct sem_waiter *waiter;
|
||||
|
||||
semtrace(sem,"Entering __up");
|
||||
|
||||
/* grant the token to the process at the front of the queue */
|
||||
waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
|
||||
|
||||
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
|
||||
* It is allocated on the waiter's stack and may become invalid at
|
||||
* any time after that point (due to a wakeup from another source).
|
||||
*/
|
||||
list_del_init(&waiter->list);
|
||||
tsk = waiter->task;
|
||||
mb();
|
||||
waiter->task = NULL;
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
|
||||
semtrace(sem,"Leaving __up");
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__up);
|
||||
@@ -5,7 +5,7 @@
|
||||
extra-y := vmlinux.lds
|
||||
|
||||
obj-y := process.o traps.o ptrace.o irq.o \
|
||||
sys_h8300.o time.o semaphore.o signal.o \
|
||||
sys_h8300.o time.o signal.o \
|
||||
setup.o gpio.o init_task.o syscalls.o \
|
||||
entry.o
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/gpio.h>
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
/*
|
||||
* Generic semaphore code. Buyer beware. Do your own
|
||||
* specific changes in <asm/semaphore-helper.h>
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/semaphore-helper.h>
|
||||
|
||||
#ifndef CONFIG_RMW_INSNS
|
||||
spinlock_t semaphore_wake_lock;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Semaphores are implemented using a two-way counter:
|
||||
* The "count" variable is decremented for each process
|
||||
* that tries to sleep, while the "waking" variable is
|
||||
* incremented when the "up()" code goes to wake up waiting
|
||||
* processes.
|
||||
*
|
||||
* Notably, the inline "up()" and "down()" functions can
|
||||
* efficiently test if they need to do any extra work (up
|
||||
* needs to do something only if count was negative before
|
||||
* the increment operation.
|
||||
*
|
||||
* waking_non_zero() (from asm/semaphore.h) must execute
|
||||
* atomically.
|
||||
*
|
||||
* When __up() is called, the count was negative before
|
||||
* incrementing it, and we need to wake up somebody.
|
||||
*
|
||||
* This routine adds one to the count of processes that need to
|
||||
* wake up and exit. ALL waiting processes actually wake up but
|
||||
* only the one that gets to the "waking" field first will gate
|
||||
* through and acquire the semaphore. The others will go back
|
||||
* to sleep.
|
||||
*
|
||||
* Note that these functions are only called when there is
|
||||
* contention on the lock, and as such all this is the
|
||||
* "non-critical" part of the whole semaphore business. The
|
||||
* critical part is the inline stuff in <asm/semaphore.h>
|
||||
* where we want to avoid any extra jumps and calls.
|
||||
*/
|
||||
void __up(struct semaphore *sem)
|
||||
{
|
||||
wake_one_more(sem);
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the "down" function. Return zero for semaphore acquired,
|
||||
* return negative for signalled out of the function.
|
||||
*
|
||||
* If called from __down, the return is ignored and the wait loop is
|
||||
* not interruptible. This means that a task waiting on a semaphore
|
||||
* using "down()" cannot be killed until someone does an "up()" on
|
||||
* the semaphore.
|
||||
*
|
||||
* If called from __down_interruptible, the return value gets checked
|
||||
* upon return. If the return value is negative then the task continues
|
||||
* with the negative value in the return register (it can be tested by
|
||||
* the caller).
|
||||
*
|
||||
* Either form may be used in conjunction with "up()".
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#define DOWN_HEAD(task_state) \
|
||||
\
|
||||
\
|
||||
current->state = (task_state); \
|
||||
add_wait_queue(&sem->wait, &wait); \
|
||||
\
|
||||
/* \
|
||||
* Ok, we're set up. sem->count is known to be less than zero \
|
||||
* so we must wait. \
|
||||
* \
|
||||
* We can let go the lock for purposes of waiting. \
|
||||
* We re-acquire it after awaking so as to protect \
|
||||
* all semaphore operations. \
|
||||
* \
|
||||
* If "up()" is called before we call waking_non_zero() then \
|
||||
* we will catch it right away. If it is called later then \
|
||||
* we will have to go through a wakeup cycle to catch it. \
|
||||
* \
|
||||
* Multiple waiters contend for the semaphore lock to see \
|
||||
* who gets to gate through and who has to wait some more. \
|
||||
*/ \
|
||||
for (;;) {
|
||||
|
||||
#define DOWN_TAIL(task_state) \
|
||||
current->state = (task_state); \
|
||||
} \
|
||||
current->state = TASK_RUNNING; \
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
|
||||
void __sched __down(struct semaphore * sem)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
|
||||
if (waking_non_zero(sem))
|
||||
break;
|
||||
schedule();
|
||||
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
|
||||
}
|
||||
|
||||
int __sched __down_interruptible(struct semaphore * sem)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
int ret = 0;
|
||||
|
||||
DOWN_HEAD(TASK_INTERRUPTIBLE)
|
||||
|
||||
ret = waking_non_zero_interruptible(sem, current);
|
||||
if (ret)
|
||||
{
|
||||
if (ret == 1)
|
||||
/* ret != 0 only if we get interrupted -arca */
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
DOWN_TAIL(TASK_INTERRUPTIBLE)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __down_trylock(struct semaphore * sem)
|
||||
{
|
||||
return waking_non_zero_trylock(sem);
|
||||
}
|
||||
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
|
||||
|
||||
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
|
||||
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
|
||||
salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
|
||||
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
|
||||
unwind.o mca.o mca_asm.o topology.o
|
||||
|
||||
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
|
||||
|
||||
@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
|
||||
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
|
||||
EXPORT_SYMBOL(csum_ipv6_magic);
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
EXPORT_SYMBOL(__down);
|
||||
EXPORT_SYMBOL(__down_interruptible);
|
||||
EXPORT_SYMBOL(__down_trylock);
|
||||
EXPORT_SYMBOL(__up);
|
||||
|
||||
#include <asm/page.h>
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user