You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: - fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo) - optimize out non-atomic bitops on compile-time constants (Alexander Lobakin) - cleanup bitmap-related headers (Yury Norov) - x86/olpc: fix 'logical not is only applied to the left hand side' (Alexander Lobakin) - lib/nodemask: inline wrappers around bitmap (Yury Norov) * tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits) lib/nodemask: inline next_node_in() and node_random() powerpc: drop dependency on <asm/machdep.h> in archrandom.h x86/olpc: fix 'logical not is only applied to the left hand side' lib/cpumask: move some one-line wrappers to header file headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure headers/deps: mm: Split <linux/gfp_types.h> out of <linux/gfp.h> headers/deps: mm: Optimize <linux/gfp.h> header dependencies lib/cpumask: move trivial wrappers around find_bit to the header lib/cpumask: change return types to unsigned where appropriate cpumask: change return types to bool where appropriate lib/bitmap: change type of bitmap_weight to unsigned long lib/bitmap: change return types to bool where appropriate arm: align find_bit declarations with generic kernel iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE) lib/test_bitmap: test the tail after bitmap_to_arr64() lib/bitmap: fix off-by-one in bitmap_to_arr64() lib: test_bitmap: add compile-time optimization/evaluations assertions bitmap: don't assume compiler evaluates small mem*() builtins calls net/ice: fix initializing the bitmap in the switch code bitops: let optimize out non-atomic bitops on compile-time constants ...
This commit is contained in:
@@ -22,16 +22,16 @@ Memory Allocation Controls
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Page mobility and placement hints
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Watermark modifiers
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Reclaim modifiers
|
||||
|
||||
.. kernel-doc:: include/linux/gfp.h
|
||||
.. kernel-doc:: include/linux/gfp_types.h
|
||||
:doc: Useful GFP flag combinations
|
||||
|
||||
The Slab Cache
|
||||
|
||||
@@ -3603,7 +3603,6 @@ F: lib/bitmap.c
|
||||
F: lib/cpumask.c
|
||||
F: lib/find_bit.c
|
||||
F: lib/find_bit_benchmark.c
|
||||
F: lib/nodemask.c
|
||||
F: lib/test_bitmap.c
|
||||
F: tools/include/linux/bitmap.h
|
||||
F: tools/include/linux/find.h
|
||||
@@ -13136,6 +13135,7 @@ W: http://www.linux-mm.org
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
|
||||
F: include/linux/gfp.h
|
||||
F: include/linux/gfp_types.h
|
||||
F: include/linux/memory_hotplug.h
|
||||
F: include/linux/mm.h
|
||||
F: include/linux/mmzone.h
|
||||
|
||||
@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
|
||||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static inline void
|
||||
__set_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
||||
@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
|
||||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static __inline__ void
|
||||
__clear_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
||||
@@ -94,7 +94,7 @@ static inline void
|
||||
__clear_bit_unlock(unsigned long nr, volatile void * addr)
|
||||
{
|
||||
smp_mb();
|
||||
__clear_bit(nr, addr);
|
||||
arch___clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
|
||||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static __inline__ void
|
||||
__change_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
|
||||
@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
|
||||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static inline int
|
||||
__test_and_set_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = 1 << (nr & 0x1f);
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
|
||||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static inline int
|
||||
__test_and_clear_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = 1 << (nr & 0x1f);
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
|
||||
/*
|
||||
* WARNING: non atomic version.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_change_bit(unsigned long nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = 1 << (nr & 0x1f);
|
||||
int *m = ((int *) addr) + (nr >> 5);
|
||||
@@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
test_bit(int nr, const volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
|
||||
}
|
||||
@@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
|
||||
return __ffs(tmp) + ofs;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
||||
@@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
|
||||
/*
|
||||
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
|
||||
*/
|
||||
extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
|
||||
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
|
||||
unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_zero_bit_le(const unsigned long *p,
|
||||
unsigned long size, unsigned long offset);
|
||||
unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);
|
||||
|
||||
/*
|
||||
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
|
||||
*/
|
||||
extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
|
||||
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
|
||||
extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
|
||||
unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_zero_bit_be(const unsigned long *p,
|
||||
unsigned long size, unsigned long offset);
|
||||
unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
|
||||
unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
|
||||
@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
|
||||
* be atomic, particularly for things like slab_lock and slab_unlock.
|
||||
*
|
||||
*/
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
/* Apparently, at least some of these are allowed to be non-atomic */
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int __test_bit(int nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
int retval;
|
||||
|
||||
@@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
#define test_bit(nr, addr) __test_bit(nr, addr)
|
||||
|
||||
/*
|
||||
* ffz - find first zero in word.
|
||||
* @word: The word to search
|
||||
@@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
||||
@@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* arch___set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
@@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr)
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__set_bit (int nr, volatile void *addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
|
||||
}
|
||||
@@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __clear_bit - Clears a bit in memory (non-atomic version)
|
||||
* arch___clear_bit - Clears a bit in memory (non-atomic version)
|
||||
* @nr: the bit to clear
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
@@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr)
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__clear_bit (int nr, volatile void *addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
|
||||
}
|
||||
@@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* arch___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to toggle
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
@@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr)
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __inline__ void
|
||||
__change_bit (int nr, volatile void *addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
|
||||
}
|
||||
@@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr)
|
||||
#define test_and_set_bit_lock test_and_set_bit
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* arch___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
@@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr)
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_set_bit (int nr, volatile void *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__u32 *p = (__u32 *) addr + (nr >> 5);
|
||||
__u32 m = 1 << (nr & 31);
|
||||
@@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* arch___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
@@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr)
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_clear_bit(int nr, volatile void * addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__u32 *p = (__u32 *) addr + (nr >> 5);
|
||||
__u32 m = 1 << (nr & 31);
|
||||
@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_change_bit - Change a bit and return its old value
|
||||
* arch___test_and_change_bit - Change a bit and return its old value
|
||||
* @nr: Bit to change
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
*/
|
||||
static __inline__ int
|
||||
__test_and_change_bit (int nr, void *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
__u32 old, bit = (1 << (nr & 31));
|
||||
__u32 *m = (__u32 *) addr + (nr >> 5);
|
||||
@@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
|
||||
return (old & bit) != 0;
|
||||
}
|
||||
|
||||
static __inline__ int
|
||||
test_bit (int nr, const volatile void *addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
|
||||
}
|
||||
@@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
||||
@@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector)
|
||||
{
|
||||
unsigned int reg = vector / 64;
|
||||
unsigned int bit = vector % 64;
|
||||
u64 irr;
|
||||
unsigned long irr;
|
||||
|
||||
switch (reg) {
|
||||
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
|
||||
|
||||
@@ -65,8 +65,11 @@ static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
|
||||
bfset_mem_set_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
|
||||
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
|
||||
{
|
||||
@@ -105,8 +108,11 @@ static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
|
||||
bfclr_mem_clear_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
|
||||
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
|
||||
{
|
||||
@@ -145,14 +151,17 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
|
||||
bfchg_mem_change_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
|
||||
|
||||
|
||||
static inline int test_bit(int nr, const volatile unsigned long *vaddr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
|
||||
change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
|
||||
}
|
||||
|
||||
static inline int bset_reg_test_and_set_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
@@ -201,8 +210,11 @@ static inline int bfset_mem_test_and_set_bit(int nr,
|
||||
bfset_mem_test_and_set_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int bclr_reg_test_and_clear_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
@@ -251,8 +263,11 @@ static inline int bfclr_mem_test_and_clear_bit(int nr,
|
||||
bfclr_mem_test_and_clear_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int bchg_reg_test_and_change_bit(int nr,
|
||||
volatile unsigned long *vaddr)
|
||||
@@ -301,8 +316,11 @@ static inline int bfchg_mem_test_and_change_bit(int nr,
|
||||
bfchg_mem_test_and_change_bit(nr, vaddr))
|
||||
#endif
|
||||
|
||||
#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* The true 68020 and more advanced processors support the "bfffo"
|
||||
@@ -522,6 +540,7 @@ static inline unsigned long __fls(unsigned long x)
|
||||
#define clear_bit_unlock clear_bit
|
||||
#define __clear_bit_unlock clear_bit_unlock
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
#include <asm-generic/bitops/ext2-atomic.h>
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
||||
@@ -2,19 +2,12 @@
|
||||
#ifndef _ASM_POWERPC_ARCHRANDOM_H
|
||||
#define _ASM_POWERPC_ARCHRANDOM_H
|
||||
|
||||
#include <asm/machdep.h>
|
||||
|
||||
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
|
||||
{
|
||||
if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs);
|
||||
|
||||
#ifdef CONFIG_PPC_POWERNV
|
||||
int pnv_get_random_long(unsigned long *v);
|
||||
|
||||
@@ -171,6 +171,14 @@ EXPORT_SYMBOL_GPL(machine_power_off);
|
||||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL_GPL(pm_power_off);
|
||||
|
||||
size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
|
||||
{
|
||||
if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_get_random_seed_longs);
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
machine_shutdown();
|
||||
|
||||
@@ -113,75 +113,76 @@ static inline bool arch_test_and_change_bit(unsigned long nr,
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
*addr |= mask;
|
||||
*p |= mask;
|
||||
}
|
||||
|
||||
static inline void arch___clear_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
*addr &= ~mask;
|
||||
*p &= ~mask;
|
||||
}
|
||||
|
||||
static inline void arch___change_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
*addr ^= mask;
|
||||
*p ^= mask;
|
||||
}
|
||||
|
||||
static inline bool arch___test_and_set_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
unsigned long old;
|
||||
|
||||
old = *addr;
|
||||
*addr |= mask;
|
||||
old = *p;
|
||||
*p |= mask;
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static inline bool arch___test_and_clear_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
unsigned long old;
|
||||
|
||||
old = *addr;
|
||||
*addr &= ~mask;
|
||||
old = *p;
|
||||
*p &= ~mask;
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static inline bool arch___test_and_change_bit(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long *addr = __bitops_word(nr, ptr);
|
||||
unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
unsigned long old;
|
||||
|
||||
old = *addr;
|
||||
*addr ^= mask;
|
||||
old = *p;
|
||||
*p ^= mask;
|
||||
return old & mask;
|
||||
}
|
||||
|
||||
static inline bool arch_test_bit(unsigned long nr,
|
||||
const volatile unsigned long *ptr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
const volatile unsigned long *addr = __bitops_word(nr, ptr);
|
||||
const volatile unsigned long *p = __bitops_word(nr, addr);
|
||||
unsigned long mask = __bitops_mask(nr);
|
||||
|
||||
return *addr & mask;
|
||||
return *p & mask;
|
||||
}
|
||||
|
||||
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#ifndef __ASM_SH_BITOPS_OP32_H
|
||||
#define __ASM_SH_BITOPS_OP32_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
/*
|
||||
* The bit modifying instructions on SH-2A are only capable of working
|
||||
* with a 3-bit immediate, which signifies the shift position for the bit
|
||||
@@ -16,7 +18,8 @@
|
||||
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
|
||||
#endif
|
||||
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (__builtin_constant_p(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
@@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (__builtin_constant_p(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
@@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* arch___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
@@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (__builtin_constant_p(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
@@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* arch___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
@@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
@@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* arch___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
@@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
@@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static inline int __test_and_change_bit(int nr,
|
||||
volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
@@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr,
|
||||
}
|
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set
|
||||
* arch_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
||||
|
||||
#endif /* __ASM_SH_BITOPS_OP32_H */
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask);
|
||||
unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask);
|
||||
|
||||
/*
|
||||
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
|
||||
@@ -36,7 +36,7 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *add
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
return ___set_bit(ADDR, mask) != 0;
|
||||
return sp32___set_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
@@ -46,7 +46,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
(void) ___set_bit(ADDR, mask);
|
||||
(void) sp32___set_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
@@ -56,7 +56,7 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
return ___clear_bit(ADDR, mask) != 0;
|
||||
return sp32___clear_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
@@ -66,7 +66,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
(void) ___clear_bit(ADDR, mask);
|
||||
(void) sp32___clear_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
@@ -76,7 +76,7 @@ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
return ___change_bit(ADDR, mask) != 0;
|
||||
return sp32___change_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
@@ -86,7 +86,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
(void) ___change_bit(ADDR, mask);
|
||||
(void) sp32___change_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
||||
@@ -120,7 +120,7 @@ void arch_atomic_set(atomic_t *v, int i)
|
||||
}
|
||||
EXPORT_SYMBOL(arch_atomic_set);
|
||||
|
||||
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
|
||||
unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
@@ -131,9 +131,9 @@ unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
|
||||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___set_bit);
|
||||
EXPORT_SYMBOL(sp32___set_bit);
|
||||
|
||||
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
|
||||
unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
@@ -144,9 +144,9 @@ unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
|
||||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___clear_bit);
|
||||
EXPORT_SYMBOL(sp32___clear_bit);
|
||||
|
||||
unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
|
||||
unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
@@ -157,7 +157,7 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
|
||||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___change_bit);
|
||||
EXPORT_SYMBOL(sp32___change_bit);
|
||||
|
||||
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
|
||||
{
|
||||
|
||||
@@ -63,7 +63,7 @@ arch_set_bit(long nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
arch___set_bit(long nr, volatile unsigned long *addr)
|
||||
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||
}
|
||||
@@ -89,7 +89,7 @@ arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
arch___clear_bit(long nr, volatile unsigned long *addr)
|
||||
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||
}
|
||||
@@ -114,7 +114,7 @@ arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
arch___change_bit(long nr, volatile unsigned long *addr)
|
||||
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||
}
|
||||
@@ -145,7 +145,7 @@ arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
@@ -171,7 +171,7 @@ arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
* this without also updating arch/x86/kernel/kvm.c
|
||||
*/
|
||||
static __always_inline bool
|
||||
arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
@@ -183,7 +183,7 @@ arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch___test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool oldbit;
|
||||
|
||||
@@ -219,10 +219,12 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
#define arch_test_bit(nr, addr) \
|
||||
(__builtin_constant_p((nr)) \
|
||||
? constant_test_bit((nr), (addr)) \
|
||||
: variable_test_bit((nr), (addr)))
|
||||
static __always_inline bool
|
||||
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
|
||||
variable_test_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ffs - find first set bit in word
|
||||
|
||||
@@ -80,7 +80,7 @@ static void send_ebook_state(void)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state)
|
||||
if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state)
|
||||
return; /* Nothing new to report. */
|
||||
|
||||
input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
|
||||
|
||||
@@ -494,7 +494,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
|
||||
if (drhd->reg_base_addr == rhsa->base_address) {
|
||||
int node = pxm_to_node(rhsa->proximity_domain);
|
||||
|
||||
if (!node_online(node))
|
||||
if (node != NUMA_NO_NODE && !node_online(node))
|
||||
node = NUMA_NO_NODE;
|
||||
drhd->iommu->node = node;
|
||||
return 0;
|
||||
|
||||
@@ -4971,7 +4971,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
|
||||
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
|
||||
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
|
||||
|
||||
bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
|
||||
bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
|
||||
|
||||
/* For each profile we are going to associate the recipe with, add the
|
||||
* recipes that are associated with that profile. This will give us
|
||||
|
||||
@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
|
||||
field = min(
|
||||
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
|
||||
dev->caps.num_ports);
|
||||
(unsigned int) dev->caps.num_ports);
|
||||
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
|
||||
|
||||
size = dev->caps.function_caps; /* set PF behaviours */
|
||||
|
||||
161
include/asm-generic/bitops/generic-non-atomic.h
Normal file
161
include/asm-generic/bitops/generic-non-atomic.h
Normal file
@@ -0,0 +1,161 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
|
||||
#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
#error only <linux/bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic definitions for bit operations, should not be used in regular code
|
||||
* directly.
|
||||
*/
|
||||
|
||||
/**
|
||||
* generic___set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike set_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __always_inline void
|
||||
generic___set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p |= mask;
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p &= ~mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic___change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static __always_inline void
|
||||
generic___change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p ^= mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic___test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __always_inline bool
|
||||
generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old | mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic___test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static __always_inline bool
|
||||
generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old & ~mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static __always_inline bool
|
||||
generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old ^ mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* generic_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static __always_inline bool
|
||||
generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
/*
|
||||
* Unlike the bitops with the '__' prefix above, this one *is* atomic,
|
||||
* so `volatile` must always stay here with no cast-aways. See
|
||||
* `Documentation/atomic_bitops.txt` for the details.
|
||||
*/
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
/*
|
||||
* const_*() definitions provide good compile-time optimizations when
|
||||
* the passed arguments can be resolved at compile time.
|
||||
*/
|
||||
#define const___set_bit generic___set_bit
|
||||
#define const___clear_bit generic___clear_bit
|
||||
#define const___change_bit generic___change_bit
|
||||
#define const___test_and_set_bit generic___test_and_set_bit
|
||||
#define const___test_and_clear_bit generic___test_and_clear_bit
|
||||
#define const___test_and_change_bit generic___test_and_change_bit
|
||||
|
||||
/**
|
||||
* const_test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*
|
||||
* A version of generic_test_bit() which discards the `volatile` qualifier to
|
||||
* allow a compiler to optimize code harder. Non-atomic and to be called only
|
||||
* for testing compile-time constants, e.g. by the corresponding macros, not
|
||||
* directly from "regular" code.
|
||||
*/
|
||||
static __always_inline bool
|
||||
const_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long val = *p;
|
||||
|
||||
return !!(val & mask);
|
||||
}
|
||||
|
||||
#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user