You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Blackfin arch: SMP supporting patchset: Blackfin header files and machine common code
Blackfin dual core BF561 processor can support SMP like features. https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like In this patch, we provide SMP extend to Blackfin header files and machine common code Signed-off-by: Graf Yang <graf.yang@analog.com> Signed-off-by: Bryan Wu <cooloney@kernel.org>
This commit is contained in:
@@ -15,11 +15,80 @@
|
||||
*/
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define atomic_read(v) ((v)->counter)
|
||||
#define atomic_set(v, i) (((v)->counter) = i)
|
||||
|
||||
static __inline__ void atomic_add(int i, atomic_t * v)
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
|
||||
|
||||
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
|
||||
|
||||
asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
|
||||
|
||||
asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
|
||||
|
||||
asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
|
||||
|
||||
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
|
||||
|
||||
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
__raw_atomic_update_asm(&v->counter, i);
|
||||
}
|
||||
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
__raw_atomic_update_asm(&v->counter, -i);
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return __raw_atomic_update_asm(&v->counter, i);
|
||||
}
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
return __raw_atomic_update_asm(&v->counter, -i);
|
||||
}
|
||||
|
||||
static inline void atomic_inc(volatile atomic_t *v)
|
||||
{
|
||||
__raw_atomic_update_asm(&v->counter, 1);
|
||||
}
|
||||
|
||||
static inline void atomic_dec(volatile atomic_t *v)
|
||||
{
|
||||
__raw_atomic_update_asm(&v->counter, -1);
|
||||
}
|
||||
|
||||
static inline void atomic_clear_mask(int mask, atomic_t *v)
|
||||
{
|
||||
__raw_atomic_clear_asm(&v->counter, mask);
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(int mask, atomic_t *v)
|
||||
{
|
||||
__raw_atomic_set_asm(&v->counter, mask);
|
||||
}
|
||||
|
||||
static inline int atomic_test_mask(int mask, atomic_t *v)
|
||||
{
|
||||
return __raw_atomic_test_asm(&v->counter, mask);
|
||||
}
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
#define atomic_read(v) ((v)->counter)
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
long flags;
|
||||
|
||||
@@ -28,7 +97,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void atomic_sub(int i, atomic_t * v)
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
long flags;
|
||||
|
||||
@@ -38,7 +107,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
|
||||
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t * v)
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int __temp = 0;
|
||||
long flags;
|
||||
@@ -52,8 +121,7 @@ static inline int atomic_add_return(int i, atomic_t * v)
|
||||
return __temp;
|
||||
}
|
||||
|
||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||
static inline int atomic_sub_return(int i, atomic_t * v)
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
int __temp = 0;
|
||||
long flags;
|
||||
@@ -66,7 +134,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
|
||||
return __temp;
|
||||
}
|
||||
|
||||
static __inline__ void atomic_inc(volatile atomic_t * v)
|
||||
static inline void atomic_inc(volatile atomic_t *v)
|
||||
{
|
||||
long flags;
|
||||
|
||||
@@ -75,20 +143,7 @@ static __inline__ void atomic_inc(volatile atomic_t * v)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
#define atomic_add_unless(v, a, u) \
|
||||
({ \
|
||||
int c, old; \
|
||||
c = atomic_read(v); \
|
||||
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
|
||||
c = old; \
|
||||
c != (u); \
|
||||
})
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
static __inline__ void atomic_dec(volatile atomic_t * v)
|
||||
static inline void atomic_dec(volatile atomic_t *v)
|
||||
{
|
||||
long flags;
|
||||
|
||||
@@ -97,7 +152,7 @@ static __inline__ void atomic_dec(volatile atomic_t * v)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
|
||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
long flags;
|
||||
|
||||
@@ -106,7 +161,7 @@ static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
|
||||
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
long flags;
|
||||
|
||||
@@ -121,9 +176,25 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||
#define atomic_dec_return(v) atomic_sub_return(1,(v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1,(v))
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
#define atomic_add_unless(v, a, u) \
|
||||
({ \
|
||||
int c, old; \
|
||||
c = atomic_read(v); \
|
||||
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
|
||||
c = old; \
|
||||
c != (u); \
|
||||
})
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
/*
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
|
||||
@@ -47,6 +47,9 @@
|
||||
# define DMA_UNCACHED_REGION (0)
|
||||
#endif
|
||||
|
||||
extern void bfin_setup_caches(unsigned int cpu);
|
||||
extern void bfin_setup_cpudata(unsigned int cpu);
|
||||
|
||||
extern unsigned long get_cclk(void);
|
||||
extern unsigned long get_sclk(void);
|
||||
extern unsigned long sclk_to_usecs(unsigned long sclk);
|
||||
@@ -58,8 +61,6 @@ extern void dump_bfin_trace_buffer(void);
|
||||
|
||||
/* init functions only */
|
||||
extern int init_arch_irq(void);
|
||||
extern void bfin_icache_init(void);
|
||||
extern void bfin_dcache_init(void);
|
||||
extern void init_exception_vectors(void);
|
||||
extern void program_IAR(void);
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/byteorder.h> /* swab32 */
|
||||
#include <asm/system.h> /* save_flags */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
@@ -20,12 +19,75 @@
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
#include <asm-generic/bitops/ffz.h>
|
||||
|
||||
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);
|
||||
|
||||
asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);
|
||||
|
||||
asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);
|
||||
|
||||
asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
|
||||
|
||||
asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
|
||||
|
||||
asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
|
||||
|
||||
asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
|
||||
|
||||
static inline void set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr + (nr >> 5);
|
||||
__raw_bit_set_asm(a, nr & 0x1f);
|
||||
}
|
||||
|
||||
static inline void clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr + (nr >> 5);
|
||||
__raw_bit_clear_asm(a, nr & 0x1f);
|
||||
}
|
||||
|
||||
static inline void change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr + (nr >> 5);
|
||||
__raw_bit_toggle_asm(a, nr & 0x1f);
|
||||
}
|
||||
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
volatile const unsigned long *a = addr + (nr >> 5);
|
||||
return __raw_bit_test_asm(a, nr & 0x1f) != 0;
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr + (nr >> 5);
|
||||
return __raw_bit_test_set_asm(a, nr & 0x1f);
|
||||
}
|
||||
|
||||
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr + (nr >> 5);
|
||||
return __raw_bit_test_clear_asm(a, nr & 0x1f);
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr + (nr >> 5);
|
||||
return __raw_bit_test_toggle_asm(a, nr & 0x1f);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
#include <asm/system.h> /* save_flags */
|
||||
|
||||
static inline void set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *a = (int *)addr;
|
||||
int mask;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
@@ -33,23 +95,7 @@ static __inline__ void set_bit(int nr, volatile unsigned long *addr)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *a = (int *)addr;
|
||||
int mask;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
*a |= mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* clear_bit() doesn't provide any barrier for the compiler.
|
||||
*/
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
|
||||
static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
|
||||
static inline void clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *a = (int *)addr;
|
||||
int mask;
|
||||
@@ -61,17 +107,7 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *a = (int *)addr;
|
||||
int mask;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
*a &= ~mask;
|
||||
}
|
||||
|
||||
static __inline__ void change_bit(int nr, volatile unsigned long *addr)
|
||||
static inline void change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, flags;
|
||||
unsigned long *ADDR = (unsigned long *)addr;
|
||||
@@ -83,17 +119,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask;
|
||||
unsigned long *ADDR = (unsigned long *)addr;
|
||||
|
||||
ADDR += nr >> 5;
|
||||
mask = 1 << (nr & 31);
|
||||
*ADDR ^= mask;
|
||||
}
|
||||
|
||||
static __inline__ int test_and_set_bit(int nr, void *addr)
|
||||
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = (volatile unsigned int *)addr;
|
||||
@@ -109,19 +135,7 @@ static __inline__ int test_and_set_bit(int nr, void *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = (volatile unsigned int *)addr;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
retval = (mask & *a) != 0;
|
||||
*a |= mask;
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = (volatile unsigned int *)addr;
|
||||
@@ -137,19 +151,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = (volatile unsigned int *)addr;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
retval = (mask & *a) != 0;
|
||||
*a &= ~mask;
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = (volatile unsigned int *)addr;
|
||||
@@ -164,7 +166,69 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __inline__ int __test_and_change_bit(int nr,
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* clear_bit() doesn't provide any barrier for the compiler.
|
||||
*/
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *a = (int *)addr;
|
||||
int mask;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
*a |= mask;
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int *a = (int *)addr;
|
||||
int mask;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
*a &= ~mask;
|
||||
}
|
||||
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask;
|
||||
unsigned long *ADDR = (unsigned long *)addr;
|
||||
|
||||
ADDR += nr >> 5;
|
||||
mask = 1 << (nr & 31);
|
||||
*ADDR ^= mask;
|
||||
}
|
||||
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = (volatile unsigned int *)addr;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
retval = (mask & *a) != 0;
|
||||
*a |= mask;
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = (volatile unsigned int *)addr;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
retval = (mask & *a) != 0;
|
||||
*a &= ~mask;
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int __test_and_change_bit(int nr,
|
||||
volatile unsigned long *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
@@ -177,16 +241,7 @@ static __inline__ int __test_and_change_bit(int nr,
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine doesn't need to be atomic.
|
||||
*/
|
||||
static __inline__ int __constant_test_bit(int nr, const void *addr)
|
||||
{
|
||||
return ((1UL << (nr & 31)) &
|
||||
(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
|
||||
}
|
||||
|
||||
static __inline__ int __test_bit(int nr, const void *addr)
|
||||
static inline int __test_bit(int nr, const void *addr)
|
||||
{
|
||||
int *a = (int *)addr;
|
||||
int mask;
|
||||
@@ -196,10 +251,16 @@ static __inline__ int __test_bit(int nr, const void *addr)
|
||||
return ((mask & *a) != 0);
|
||||
}
|
||||
|
||||
#define test_bit(nr,addr) \
|
||||
(__builtin_constant_p(nr) ? \
|
||||
__constant_test_bit((nr),(addr)) : \
|
||||
__test_bit((nr),(addr)))
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
* This routine doesn't need irq save and restore ops in UP
|
||||
* context.
|
||||
*/
|
||||
static inline int test_bit(int nr, const void *addr)
|
||||
{
|
||||
return __test_bit(nr, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitops/find.h>
|
||||
#include <asm-generic/bitops/hweight.h>
|
||||
|
||||
@@ -12,6 +12,11 @@
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __cacheline_aligned
|
||||
#else
|
||||
#define ____cacheline_aligned
|
||||
|
||||
/*
|
||||
* Put cacheline_aliged data to L1 data memory
|
||||
*/
|
||||
@@ -21,9 +26,33 @@
|
||||
__section__(".data_l1.cacheline_aligned")))
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* largest L1 which this arch supports
|
||||
*/
|
||||
#define L1_CACHE_SHIFT_MAX 5
|
||||
|
||||
#if defined(CONFIG_SMP) && \
|
||||
!defined(CONFIG_BFIN_CACHE_COHERENT) && \
|
||||
defined(CONFIG_BFIN_DCACHE)
|
||||
#define __ARCH_SYNC_CORE_DCACHE
|
||||
#ifndef __ASSEMBLY__
|
||||
asmlinkage void __raw_smp_mark_barrier_asm(void);
|
||||
asmlinkage void __raw_smp_check_barrier_asm(void);
|
||||
|
||||
static inline void smp_mark_barrier(void)
|
||||
{
|
||||
__raw_smp_mark_barrier_asm();
|
||||
}
|
||||
static inline void smp_check_barrier(void)
|
||||
{
|
||||
__raw_smp_check_barrier_asm();
|
||||
}
|
||||
|
||||
void resync_core_dcache(void);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
@@ -35,6 +35,7 @@ extern void blackfin_icache_flush_range(unsigned long start_address, unsigned lo
|
||||
extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
|
||||
extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
|
||||
extern void blackfin_dflush_page(void *page);
|
||||
extern void blackfin_invalidate_entire_dcache(void);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
@@ -44,12 +45,20 @@ extern void blackfin_dflush_page(void *page);
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define flush_icache_range_others(start, end) \
|
||||
smp_icache_flush_range_others((start), (end))
|
||||
#else
|
||||
#define flush_icache_range_others(start, end) do { } while (0)
|
||||
#endif
|
||||
|
||||
static inline void flush_icache_range(unsigned start, unsigned end)
|
||||
{
|
||||
#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE)
|
||||
|
||||
# if defined(CONFIG_BFIN_WT)
|
||||
blackfin_icache_flush_range((start), (end));
|
||||
flush_icache_range_others(start, end);
|
||||
# else
|
||||
blackfin_icache_dcache_flush_range((start), (end));
|
||||
# endif
|
||||
@@ -58,6 +67,7 @@ static inline void flush_icache_range(unsigned start, unsigned end)
|
||||
|
||||
# if defined(CONFIG_BFIN_ICACHE)
|
||||
blackfin_icache_flush_range((start), (end));
|
||||
flush_icache_range_others(start, end);
|
||||
# endif
|
||||
# if defined(CONFIG_BFIN_DCACHE)
|
||||
blackfin_dcache_flush_range((start), (end));
|
||||
@@ -66,10 +76,12 @@ static inline void flush_icache_range(unsigned start, unsigned end)
|
||||
#endif
|
||||
}
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { memcpy(dst, src, len); \
|
||||
flush_icache_range ((unsigned) (dst), (unsigned) (dst) + (len)); \
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { memcpy(dst, src, len); \
|
||||
flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
|
||||
flush_icache_range_others((unsigned long) (dst), (unsigned long) (dst) + (len));\
|
||||
} while (0)
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len)
|
||||
|
||||
#if defined(CONFIG_BFIN_DCACHE)
|
||||
@@ -82,7 +94,7 @@ do { memcpy(dst, src, len); \
|
||||
# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
|
||||
#else
|
||||
# define flush_dcache_range(start,end) do { } while (0)
|
||||
# define flush_dcache_page(page) do { } while (0)
|
||||
# define flush_dcache_page(page) do { } while (0)
|
||||
#endif
|
||||
|
||||
extern unsigned long reserved_mem_dcache_on;
|
||||
|
||||
@@ -303,9 +303,14 @@
|
||||
RETI = [sp++];
|
||||
RETS = [sp++];
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
GET_PDA(p0, r0);
|
||||
r0 = [p0 + PDA_IRQFLAGS];
|
||||
#else
|
||||
p0.h = _irq_flags;
|
||||
p0.l = _irq_flags;
|
||||
r0 = [p0];
|
||||
#endif
|
||||
sti r0;
|
||||
|
||||
sp += 4; /* Skip Reserved */
|
||||
@@ -352,4 +357,3 @@
|
||||
SYSCFG = [sp++];
|
||||
csync;
|
||||
.endm
|
||||
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* File: arch/blackfin/include/asm/cpu.h.
|
||||
* Author: Philippe Gerum <rpm@xenomai.org>
|
||||
*
|
||||
* Copyright 2007 Analog Devices Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see the file COPYING, or write
|
||||
* to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef __ASM_BLACKFIN_CPU_H
|
||||
#define __ASM_BLACKFIN_CPU_H
|
||||
|
||||
#include <linux/percpu.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
struct blackfin_cpudata {
|
||||
struct cpu cpu;
|
||||
struct task_struct *idle;
|
||||
unsigned long cclk;
|
||||
unsigned int imemctl;
|
||||
unsigned int dmemctl;
|
||||
unsigned long loops_per_jiffy;
|
||||
unsigned long dcache_invld_count;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
|
||||
|
||||
#endif
|
||||
@@ -24,7 +24,8 @@ struct l1_scratch_task_info
|
||||
};
|
||||
|
||||
/* A pointer to the structure in memory. */
|
||||
#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)L1_SCRATCH_START)
|
||||
#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)\
|
||||
get_l1_scratch_start())
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* include/asm-generic/mutex-dec.h
|
||||
*
|
||||
* Generic implementation of the mutex fastpath, based on atomic
|
||||
* decrement/increment.
|
||||
*/
|
||||
#ifndef _ASM_GENERIC_MUTEX_DEC_H
|
||||
#define _ASM_GENERIC_MUTEX_DEC_H
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
fail_fn(count);
|
||||
else
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
|
||||
* or anything the slow path function returns.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
return fail_fn(count);
|
||||
else {
|
||||
smp_mb();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*
|
||||
* If the implementation sets it to a value of lower than 1, then the
|
||||
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
|
||||
* to return 0 otherwise.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
smp_mb();
|
||||
if (unlikely(atomic_inc_return(count) <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and return 0 (failure)
|
||||
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
|
||||
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
|
||||
* Additionally, if the value was < 0 originally, this function must not leave
|
||||
* it to 0 on failure.
|
||||
*
|
||||
* If the architecture has no effective trylock variant, it should call the
|
||||
* <fail_fn> spinlock-based trylock variant unconditionally.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
/*
|
||||
* We have two variants here. The cmpxchg based one is the best one
|
||||
* because it never induce a false contention state. It is included
|
||||
* here because architectures using the inc/dec algorithms over the
|
||||
* xchg ones are much more likely to support cmpxchg natively.
|
||||
*
|
||||
* If not we fall back to the spinlock based variant - that is
|
||||
* just as efficient (and simpler) as a 'destructive' probing of
|
||||
* the mutex state would be.
|
||||
*/
|
||||
#ifdef __HAVE_ARCH_CMPXCHG
|
||||
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
|
||||
smp_mb();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return fail_fn(count);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -6,4 +6,67 @@
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#ifndef _ASM_MUTEX_H
|
||||
#define _ASM_MUTEX_H
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#else
|
||||
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
fail_fn(count);
|
||||
else
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(atomic_dec_return(count) < 0))
|
||||
return fail_fn(count);
|
||||
else {
|
||||
smp_mb();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
smp_mb();
|
||||
if (unlikely(atomic_inc_return(count) <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
/*
|
||||
* We have two variants here. The cmpxchg based one is the best one
|
||||
* because it never induce a false contention state. It is included
|
||||
* here because architectures using the inc/dec algorithms over the
|
||||
* xchg ones are much more likely to support cmpxchg natively.
|
||||
*
|
||||
* If not we fall back to the spinlock based variant - that is
|
||||
* just as efficient (and simpler) as a 'destructive' probing of
|
||||
* the mutex state would be.
|
||||
*/
|
||||
#ifdef __HAVE_ARCH_CMPXCHG
|
||||
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
|
||||
smp_mb();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
return fail_fn(count);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* File: arch/blackfin/include/asm/pda.h
|
||||
* Author: Philippe Gerum <rpm@xenomai.org>
|
||||
*
|
||||
* Copyright 2007 Analog Devices Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see the file COPYING, or write
|
||||
* to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef _ASM_BLACKFIN_PDA_H
|
||||
#define _ASM_BLACKFIN_PDA_H
|
||||
|
||||
#include <asm/mem_map.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct blackfin_pda { /* Per-processor Data Area */
|
||||
struct blackfin_pda *next;
|
||||
|
||||
unsigned long syscfg;
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long imask; /* Current IMASK value */
|
||||
#endif
|
||||
|
||||
unsigned long *ipdt; /* Start of switchable I-CPLB table */
|
||||
unsigned long *ipdt_swapcount; /* Number of swaps in ipdt */
|
||||
unsigned long *dpdt; /* Start of switchable D-CPLB table */
|
||||
unsigned long *dpdt_swapcount; /* Number of swaps in dpdt */
|
||||
|
||||
/*
|
||||
* Single instructions can have multiple faults, which
|
||||
* need to be handled by traps.c, in irq5. We store
|
||||
* the exception cause to ensure we don't miss a
|
||||
* double fault condition
|
||||
*/
|
||||
unsigned long ex_iptr;
|
||||
unsigned long ex_optr;
|
||||
unsigned long ex_buf[4];
|
||||
unsigned long ex_imask; /* Saved imask from exception */
|
||||
unsigned long *ex_stack; /* Exception stack space */
|
||||
|
||||
#ifdef ANOMALY_05000261
|
||||
unsigned long last_cplb_fault_retx;
|
||||
#endif
|
||||
unsigned long dcplb_fault_addr;
|
||||
unsigned long icplb_fault_addr;
|
||||
unsigned long retx;
|
||||
unsigned long seqstat;
|
||||
};
|
||||
|
||||
extern struct blackfin_pda cpu_pda[];
|
||||
|
||||
void reserve_pda(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_BLACKFIN_PDA_H */
|
||||
@@ -3,4 +3,14 @@
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* __ARCH_BLACKFIN_PERCPU__ */
|
||||
#ifdef CONFIG_MODULES
|
||||
#define PERCPU_MODULE_RESERVE 8192
|
||||
#else
|
||||
#define PERCPU_MODULE_RESERVE 0
|
||||
#endif
|
||||
|
||||
#define PERCPU_ENOUGH_ROOM \
|
||||
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
|
||||
PERCPU_MODULE_RESERVE)
|
||||
|
||||
#endif /* __ARCH_BLACKFIN_PERCPU__ */
|
||||
|
||||
@@ -106,7 +106,8 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
eip; })
|
||||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax() smp_mb()
|
||||
|
||||
|
||||
/* Get the Silicon Revision of the chip */
|
||||
static inline uint32_t __pure bfin_revid(void)
|
||||
@@ -137,7 +138,11 @@ static inline uint32_t __pure bfin_revid(void)
|
||||
static inline uint16_t __pure bfin_cpuid(void)
|
||||
{
|
||||
return (bfin_read_CHIPID() & CHIPID_FAMILY) >> 12;
|
||||
}
|
||||
|
||||
static inline uint32_t __pure bfin_dspid(void)
|
||||
{
|
||||
return bfin_read_DSPID();
|
||||
}
|
||||
|
||||
static inline uint32_t __pure bfin_compiled_revid(void)
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
#ifndef _ASM_BLACKFIN_RWLOCK_H
|
||||
#define _ASM_BLACKFIN_RWLOCK_H
|
||||
|
||||
#define RW_LOCK_BIAS 0x01000000
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* File: arch/blackfin/include/asm/smp.h
|
||||
* Author: Philippe Gerum <rpm@xenomai.org>
|
||||
*
|
||||
* Copyright 2007 Analog Devices Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see the file COPYING, or write
|
||||
* to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef __ASM_BLACKFIN_SMP_H
|
||||
#define __ASM_BLACKFIN_SMP_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/cache.h>
|
||||
#include <asm/blackfin.h>
|
||||
#include <mach/smp.h>
|
||||
|
||||
#define raw_smp_processor_id() blackfin_core_id()
|
||||
|
||||
struct corelock_slot {
|
||||
int lock;
|
||||
};
|
||||
|
||||
void smp_icache_flush_range_others(unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
#endif /* !__ASM_BLACKFIN_SMP_H */
|
||||
@@ -1,6 +1,89 @@
|
||||
#ifndef __BFIN_SPINLOCK_H
|
||||
#define __BFIN_SPINLOCK_H
|
||||
|
||||
#error blackfin architecture does not support SMP spin lock yet
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#endif
|
||||
asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
|
||||
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
|
||||
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
|
||||
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
|
||||
asmlinkage void __raw_read_lock_asm(volatile int *ptr);
|
||||
asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
|
||||
asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
|
||||
asmlinkage void __raw_write_lock_asm(volatile int *ptr);
|
||||
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
|
||||
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
|
||||
|
||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
||||
{
|
||||
return __raw_spin_is_locked_asm(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_lock_asm(&lock->lock);
|
||||
}
|
||||
|
||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
||||
|
||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
return __raw_spin_trylock_asm(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_unlock_asm(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||
{
|
||||
while (__raw_spin_is_locked(lock))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static inline int __raw_read_can_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
return __raw_uncached_fetch_asm(&rw->lock) > 0;
|
||||
}
|
||||
|
||||
static inline int __raw_write_can_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
|
||||
}
|
||||
|
||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
__raw_read_lock_asm(&rw->lock);
|
||||
}
|
||||
|
||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
return __raw_read_trylock_asm(&rw->lock);
|
||||
}
|
||||
|
||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
__raw_read_unlock_asm(&rw->lock);
|
||||
}
|
||||
|
||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
__raw_write_lock_asm(&rw->lock);
|
||||
}
|
||||
|
||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
return __raw_write_trylock_asm(&rw->lock);
|
||||
}
|
||||
|
||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
__raw_write_unlock_asm(&rw->lock);
|
||||
}
|
||||
|
||||
#define _raw_spin_relax(lock) cpu_relax()
|
||||
#define _raw_read_relax(lock) cpu_relax()
|
||||
#define _raw_write_relax(lock) cpu_relax()
|
||||
|
||||
#endif /* !__BFIN_SPINLOCK_H */
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
#ifndef __ASM_SPINLOCK_TYPES_H
|
||||
#define __ASM_SPINLOCK_TYPES_H
|
||||
|
||||
#ifndef __LINUX_SPINLOCK_TYPES_H
|
||||
# error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
#include <asm/rwlock.h>
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
} raw_spinlock_t;
|
||||
|
||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
} raw_rwlock_t;
|
||||
|
||||
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
||||
|
||||
#endif
|
||||
@@ -37,20 +37,16 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <mach/anomaly.h>
|
||||
#include <asm/pda.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/* Forward decl needed due to cdef inter dependencies */
|
||||
static inline uint32_t __pure bfin_dspid(void);
|
||||
#define blackfin_core_id() (bfin_dspid() & 0xff)
|
||||
|
||||
/*
|
||||
* Interrupt configuring macros.
|
||||
*/
|
||||
|
||||
extern unsigned long irq_flags;
|
||||
|
||||
#define local_irq_enable() \
|
||||
__asm__ __volatile__( \
|
||||
"sti %0;" \
|
||||
: \
|
||||
: "d" (irq_flags) \
|
||||
)
|
||||
|
||||
#define local_irq_disable() \
|
||||
do { \
|
||||
int __tmp_dummy; \
|
||||
@@ -66,6 +62,18 @@ extern unsigned long irq_flags;
|
||||
# define NOP_PAD_ANOMALY_05000244
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define irq_flags cpu_pda[blackfin_core_id()].imask
|
||||
#else
|
||||
extern unsigned long irq_flags;
|
||||
#endif
|
||||
|
||||
#define local_irq_enable() \
|
||||
__asm__ __volatile__( \
|
||||
"sti %0;" \
|
||||
: \
|
||||
: "d" (irq_flags) \
|
||||
)
|
||||
#define idle_with_irq_disabled() \
|
||||
__asm__ __volatile__( \
|
||||
NOP_PAD_ANOMALY_05000244 \
|
||||
@@ -129,22 +137,85 @@ extern unsigned long irq_flags;
|
||||
#define rmb() asm volatile ("" : : :"memory")
|
||||
#define wmb() asm volatile ("" : : :"memory")
|
||||
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() rmb()
|
||||
#define smp_wmb() wmb()
|
||||
#define smp_read_barrier_depends() read_barrier_depends()
|
||||
asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
|
||||
asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value);
|
||||
asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value);
|
||||
asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr,
|
||||
unsigned long new, unsigned long old);
|
||||
asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr,
|
||||
unsigned long new, unsigned long old);
|
||||
asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
|
||||
unsigned long new, unsigned long old);
|
||||
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
|
||||
# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
|
||||
# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
|
||||
#else
|
||||
# define smp_mb() barrier()
|
||||
# define smp_rmb() barrier()
|
||||
# define smp_wmb() barrier()
|
||||
#endif
|
||||
|
||||
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
|
||||
int size)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
tmp = __raw_xchg_1_asm(ptr, x);
|
||||
break;
|
||||
case 2:
|
||||
tmp = __raw_xchg_2_asm(ptr, x);
|
||||
break;
|
||||
case 4:
|
||||
tmp = __raw_xchg_4_asm(ptr, x);
|
||||
break;
|
||||
}
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomic compare and exchange. Compare OLD with MEM, if identical,
|
||||
* store NEW in MEM. Return the initial value in MEM. Success is
|
||||
* indicated by comparing RETURN with OLD.
|
||||
*/
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
tmp = __raw_cmpxchg_1_asm(ptr, new, old);
|
||||
break;
|
||||
case 2:
|
||||
tmp = __raw_cmpxchg_2_asm(ptr, new, old);
|
||||
break;
|
||||
case 4:
|
||||
tmp = __raw_cmpxchg_4_asm(ptr, new, old);
|
||||
break;
|
||||
}
|
||||
|
||||
return tmp;
|
||||
}
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
|
||||
#define smp_read_barrier_depends() smp_check_barrier()
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
#endif
|
||||
|
||||
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
||||
|
||||
struct __xchg_dummy {
|
||||
unsigned long a[100];
|
||||
@@ -194,9 +265,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#include <asm-generic/cmpxchg.h>
|
||||
#endif
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
|
||||
#define tas(ptr) ((void)xchg((ptr), 1))
|
||||
|
||||
#define prepare_to_switch() do { } while(0)
|
||||
|
||||
@@ -218,4 +292,4 @@ do { \
|
||||
(last) = resume (prev, next); \
|
||||
} while (0)
|
||||
|
||||
#endif /* _BLACKFIN_SYSTEM_H */
|
||||
#endif /* _BLACKFIN_SYSTEM_H */
|
||||
|
||||
@@ -10,3 +10,4 @@ obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o
|
||||
obj-$(CONFIG_PM) += pm.o dpmc_modes.o
|
||||
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
|
||||
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
|
||||
@@ -97,3 +97,39 @@ ENTRY(_blackfin_dflush_page)
|
||||
P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT);
|
||||
jump .Ldfr;
|
||||
ENDPROC(_blackfin_dflush_page)
|
||||
|
||||
/* Invalidate the Entire Data cache by
|
||||
* clearing DMC[1:0] bits
|
||||
*/
|
||||
ENTRY(_blackfin_invalidate_entire_dcache)
|
||||
[--SP] = ( R7:5);
|
||||
|
||||
P0.L = LO(DMEM_CONTROL);
|
||||
P0.H = HI(DMEM_CONTROL);
|
||||
R7 = [P0];
|
||||
R5 = R7; /* Save DMEM_CNTR */
|
||||
|
||||
/* Clear the DMC[1:0] bits, All valid bits in the data
|
||||
* cache are set to the invalid state
|
||||
*/
|
||||
BITCLR(R7,DMC0_P);
|
||||
BITCLR(R7,DMC1_P);
|
||||
CLI R6;
|
||||
SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
|
||||
.align 8;
|
||||
[P0] = R7;
|
||||
SSYNC;
|
||||
STI R6;
|
||||
|
||||
/* Configures the data cache again */
|
||||
|
||||
CLI R6;
|
||||
SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
|
||||
.align 8;
|
||||
[P0] = R5;
|
||||
SSYNC;
|
||||
STI R6;
|
||||
|
||||
( R7:5) = [SP++];
|
||||
RTS;
|
||||
ENDPROC(_blackfin_invalidate_entire_dcache)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user