Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300

* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300:
  Separate out the proc- and unit-specific header directories from the general
  Move arch headers from include/asm-mn10300/ to arch/mn10300/include/asm/.
This commit is contained in:
Linus Torvalds
2009-04-10 10:01:49 -07:00
138 changed files with 31 additions and 65 deletions
+3 -37
View File
@@ -94,42 +94,8 @@ ifdef CONFIG_DEBUG_INFO
KBUILD_AFLAGS += -Wa,--gdwarf2
endif
###################################################################################################
#
# juggle some symlinks in the MN10300 asm include dir
# include the appropriate processor- and unit-specific headers
#
# Update machine proc and unit symlinks if something which affects
# them changed. We use .proc / .unit to indicate when they were
# updated last, otherwise make uses the target directory mtime.
#
###################################################################################################
# processor specific definitions
include/asm-mn10300/.proc: $(wildcard include/config/proc/*.h) include/config/auto.conf
@echo ' SYMLINK include/asm-mn10300/proc -> include/asm-mn10300/proc-$(PROCESSOR)'
ifneq ($(KBUILD_SRC),)
$(Q)mkdir -p include/asm-mn10300
$(Q)ln -fsn $(srctree)/include/asm-mn10300/proc-$(PROCESSOR) include/asm-mn10300/proc
else
$(Q)ln -fsn proc-$(PROCESSOR) include/asm-mn10300/proc
endif
@touch $@
CLEAN_FILES += include/asm-mn10300/proc include/asm-mn10300/.proc
prepare: include/asm-mn10300/.proc
# unit specific definitions
include/asm-mn10300/.unit: $(wildcard include/config/unit/*.h) include/config/auto.conf
@echo ' SYMLINK include/asm-mn10300/unit -> include/asm-mn10300/unit-$(UNIT)'
ifneq ($(KBUILD_SRC),)
$(Q)mkdir -p include/asm-mn10300
$(Q)ln -fsn $(srctree)/include/asm-mn10300/unit-$(UNIT) include/asm-mn10300/unit
else
$(Q)ln -fsn unit-$(UNIT) include/asm-mn10300/unit
endif
@touch $@
CLEAN_FILES += include/asm-mn10300/unit include/asm-mn10300/.unit
prepare: include/asm-mn10300/.unit
KBUILD_CPPFLAGS += -I$(srctree)/arch/mn10300/proc-$(PROCESSOR)/include
KBUILD_CPPFLAGS += -I$(srctree)/arch/mn10300/unit-$(UNIT)/include
+1
View File
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
+157
View File
@@ -0,0 +1,157 @@
/* MN10300 Atomic counter operations
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
#ifdef CONFIG_SMP
#error not SMP safe
#endif
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
#include <asm/system.h>
/**
* atomic_add_return - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns the result
* Note that the guaranteed useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
local_irq_save(flags);
temp = v->counter;
temp += i;
v->counter = temp;
local_irq_restore(flags);
return temp;
}
/**
* atomic_sub_return - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns the result
* Note that the guaranteed useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
local_irq_save(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
local_irq_restore(flags);
return temp;
}
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
static inline void atomic_inc(atomic_t *v)
{
atomic_add_return(1, v);
}
static inline void atomic_dec(atomic_t *v)
{
atomic_sub_return(1, v);
}
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long flags;
mask = ~mask;
local_irq_save(flags);
*addr &= mask;
local_irq_restore(flags);
}
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
/* Atomic operations are already serializing on MN10300??? */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __KERNEL__ */
#endif /* _ASM_ATOMIC_H */
+4
View File
@@ -0,0 +1,4 @@
#ifndef _ASM_AUXVEC_H
#define _ASM_AUXVEC_H
#endif
+240
View File
@@ -0,0 +1,240 @@
/* MN10300 bit operations
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#ifndef __ASM_BITOPS_H
#define __ASM_BITOPS_H
#include <asm/cpu-regs.h>
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/*
* set bit
*/
#define __set_bit(nr, addr) \
({ \
volatile unsigned char *_a = (unsigned char *)(addr); \
const unsigned shift = (nr) & 7; \
_a += (nr) >> 3; \
\
asm volatile("bset %2,(%1) # set_bit reg" \
: "=m"(*_a) \
: "a"(_a), "d"(1 << shift), "m"(*_a) \
: "memory", "cc"); \
})
#define set_bit(nr, addr) __set_bit((nr), (addr))
/*
* clear bit
*/
#define ___clear_bit(nr, addr) \
({ \
volatile unsigned char *_a = (unsigned char *)(addr); \
const unsigned shift = (nr) & 7; \
_a += (nr) >> 3; \
\
asm volatile("bclr %2,(%1) # clear_bit reg" \
: "=m"(*_a) \
: "a"(_a), "d"(1 << shift), "m"(*_a) \
: "memory", "cc"); \
})
#define clear_bit(nr, addr) ___clear_bit((nr), (addr))
static inline void __clear_bit(int nr, volatile void *addr)
{
unsigned int *a = (unsigned int *) addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
*a &= ~mask;
}
/*
* test bit
*/
static inline int test_bit(int nr, const volatile void *addr)
{
return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
}
/*
* change bit
*/
static inline void __change_bit(int nr, volatile void *addr)
{
int mask;
unsigned int *a = (unsigned int *) addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
*a ^= mask;
}
extern void change_bit(int nr, volatile void *addr);
/*
* test and set bit
*/
#define __test_and_set_bit(nr,addr) \
({ \
volatile unsigned char *_a = (unsigned char *)(addr); \
const unsigned shift = (nr) & 7; \
unsigned epsw; \
_a += (nr) >> 3; \
\
asm volatile("bset %3,(%2) # test_set_bit reg\n" \
"mov epsw,%1" \
: "=m"(*_a), "=d"(epsw) \
: "a"(_a), "d"(1 << shift), "m"(*_a) \
: "memory", "cc"); \
\
!(epsw & EPSW_FLAG_Z); \
})
#define test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr))
/*
* test and clear bit
*/
#define __test_and_clear_bit(nr, addr) \
({ \
volatile unsigned char *_a = (unsigned char *)(addr); \
const unsigned shift = (nr) & 7; \
unsigned epsw; \
_a += (nr) >> 3; \
\
asm volatile("bclr %3,(%2) # test_clear_bit reg\n" \
"mov epsw,%1" \
: "=m"(*_a), "=d"(epsw) \
: "a"(_a), "d"(1 << shift), "m"(*_a) \
: "memory", "cc"); \
\
!(epsw & EPSW_FLAG_Z); \
})
#define test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr))
/*
* test and change bit
*/
static inline int __test_and_change_bit(int nr, volatile void *addr)
{
int mask, retval;
unsigned int *a = (unsigned int *)addr;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
retval = (mask & *a) != 0;
*a ^= mask;
return retval;
}
extern int test_and_change_bit(int nr, volatile void *addr);
#include <asm-generic/bitops/lock.h>
#ifdef __KERNEL__
/**
* __ffs - find first bit set
* @x: the word to search
*
* - return 31..0 to indicate bit 31..0 most least significant bit set
* - if no bits are set in x, the result is undefined
*/
static inline __attribute__((const))
unsigned long __ffs(unsigned long x)
{
int bit;
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x));
return bit;
}
/*
* special slimline version of fls() for calculating ilog2_u32()
* - note: no protection against n == 0
*/
static inline __attribute__((const))
int __ilog2_u32(u32 n)
{
int bit;
asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n));
return bit;
}
/**
* fls - find last bit set
* @x: the word to search
*
* This is defined the same way as ffs:
* - return 32..1 to indicate bit 31..0 most significant bit set
* - return 0 to indicate no bits set
*/
static inline __attribute__((const))
int fls(int x)
{
return (x != 0) ? __ilog2_u32(x) + 1 : 0;
}
/**
* __fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
return __ilog2_u32(word);
}
/**
* ffs - find first bit set
* @x: the word to search
*
* - return 32..1 to indicate bit 31..0 most least significant bit set
* - return 0 to indicate no bits set
*/
static inline __attribute__((const))
int ffs(int x)
{
/* Note: (x & -x) gives us a mask that is the least significant
* (rightmost) 1-bit of the value in x.
*/
return fls(x & -x);
}
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr) ^ 0x18, (addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr) ^ 0x18, (addr))
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/minix-le.h>
#endif /* __KERNEL__ */
#endif /* __ASM_BITOPS_H */
+35
View File
@@ -0,0 +1,35 @@
/* MN10300 Kernel bug reporting
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_BUG_H
#define _ASM_BUG_H
/*
* Tell the user there is some problem.
*/
#define _debug_bug_trap() \
do { \
asm volatile( \
" syscall 15 \n" \
"0: \n" \
" .section __bug_table,\"a\" \n" \
" .long 0b,%0,%1 \n" \
" .previous \n" \
: \
: "i"(__FILE__), "i"(__LINE__) \
); \
} while (0)
#define BUG() _debug_bug_trap()
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
#endif /* _ASM_BUG_H */
+20
View File
@@ -0,0 +1,20 @@
/* MN10300 Checks for architecture-dependent bugs
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_BUGS_H
#define _ASM_BUGS_H
#include <asm/processor.h>
static inline void __init check_bugs(void)
{
}
#endif /* _ASM_BUGS_H */
+151
View File
@@ -0,0 +1,151 @@
/* AM33v2 on-board bus controller registers
*
* Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_BUSCTL_REGS_H
#define _ASM_BUSCTL_REGS_H
#include <asm/cpu-regs.h>
#ifdef __KERNEL__
/* bus controller registers */
#define BCCR __SYSREG(0xc0002000, u32) /* bus controller control reg */
#define BCCR_B0AD 0x00000003 /* block 0 (80000000-83ffffff) bus allocation */
#define BCCR_B1AD 0x0000000c /* block 1 (84000000-87ffffff) bus allocation */
#define BCCR_B2AD 0x00000030 /* block 2 (88000000-8bffffff) bus allocation */
#define BCCR_B3AD 0x000000c0 /* block 3 (8c000000-8fffffff) bus allocation */
#define BCCR_B4AD 0x00000300 /* block 4 (90000000-93ffffff) bus allocation */
#define BCCR_B5AD 0x00000c00 /* block 5 (94000000-97ffffff) bus allocation */
#define BCCR_B6AD 0x00003000 /* block 6 (98000000-9bffffff) bus allocation */
#define BCCR_B7AD 0x0000c000 /* block 7 (9c000000-9fffffff) bus allocation */
#define BCCR_BxAD_EXBUS 0x0 /* - direct to system bus controller */
#define BCCR_BxAD_OPEXBUS 0x1 /* - direct to memory bus controller */
#define BCCR_BxAD_OCMBUS 0x2 /* - direct to on chip memory */
#define BCCR_API 0x00070000 /* bus arbitration priority */
#define BCCR_API_DMACICD 0x00000000 /* - DMA > CI > CD */
#define BCCR_API_DMACDCI 0x00010000 /* - DMA > CD > CI */
#define BCCR_API_CICDDMA 0x00020000 /* - CI > CD > DMA */
#define BCCR_API_CDCIDMA 0x00030000 /* - CD > CI > DMA */
#define BCCR_API_ROUNDROBIN 0x00040000 /* - round robin */
#define BCCR_BEPRI_DMACICD 0x00c00000 /* bus error address priority */
#define BCCR_BEPRI_DMACDCI 0x00000000 /* - DMA > CI > CD */
#define BCCR_BEPRI_CICDDMA 0x00400000 /* - DMA > CD > CI */
#define BCCR_BEPRI_CDCIDMA 0x00800000 /* - CI > CD > DMA */
#define BCCR_BEPRI 0x00c00000 /* - CD > CI > DMA */
#define BCCR_TMON 0x03000000 /* timeout value settings */
#define BCCR_TMON_16IOCLK 0x00000000 /* - 16 IOCLK cycles */
#define BCCR_TMON_256IOCLK 0x01000000 /* - 256 IOCLK cycles */
#define BCCR_TMON_4096IOCLK 0x02000000 /* - 4096 IOCLK cycles */
#define BCCR_TMON_65536IOCLK 0x03000000 /* - 65536 IOCLK cycles */
#define BCCR_TMOE 0x10000000 /* timeout detection enable */
#define BCBERR __SYSREG(0xc0002010, u32) /* bus error source reg */
#define BCBERR_BESB 0x0000001f /* erroneous access destination space */
#define BCBERR_BESB_MON 0x00000001 /* - monitor space */
#define BCBERR_BESB_IO 0x00000002 /* - IO bus */
#define BCBERR_BESB_EX 0x00000004 /* - EX bus */
#define BCBERR_BESB_OPEX 0x00000008 /* - OpEX bus */
#define BCBERR_BESB_OCM 0x00000010 /* - on chip memory */
#define BCBERR_BERW 0x00000100 /* type of access */
#define BCBERR_BERW_WRITE 0x00000000 /* - write */
#define BCBERR_BERW_READ 0x00000100 /* - read */
#define BCBERR_BESD 0x00000200 /* error detector */
#define BCBERR_BESD_BCU 0x00000000 /* - BCU detected error */
#define BCBERR_BESD_SLAVE_BUS 0x00000200 /* - slave bus detected error */
#define BCBERR_BEBST 0x00000400 /* type of access */
#define BCBERR_BEBST_SINGLE 0x00000000 /* - single */
#define BCBERR_BEBST_BURST 0x00000400 /* - burst */
#define BCBERR_BEME 0x00000800 /* multiple bus error flag */
#define BCBERR_BEMR 0x00007000 /* master bus that caused the error */
#define BCBERR_BEMR_NOERROR 0x00000000 /* - no error */
#define BCBERR_BEMR_CI 0x00001000 /* - CPU instruction fetch bus caused error */
#define BCBERR_BEMR_CD 0x00002000 /* - CPU data bus caused error */
#define BCBERR_BEMR_DMA 0x00004000 /* - DMA bus caused error */
#define BCBEAR __SYSREGC(0xc0002020, u32) /* bus error address reg */
/* system bus controller registers */
#define SBBASE(X) __SYSREG(0xd8c00100 + (X) * 0x10, u32) /* SBC base addr regs */
#define SBBASE_BE 0x00000001 /* bank enable */
#define SBBASE_BAM 0x0000fffe /* bank address mask [31:17] */
#define SBBASE_BBA 0xfffe0000 /* bank base address [31:17] */
#define SBCNTRL0(X) __SYSREG(0xd8c00200 + (X) * 0x10, u32) /* SBC bank ctrl0 regs */
#define SBCNTRL0_WEH 0x00000f00 /* write enable hold */
#define SBCNTRL0_REH 0x0000f000 /* read enable hold */
#define SBCNTRL0_RWH 0x000f0000 /* SRW signal hold */
#define SBCNTRL0_CSH 0x00f00000 /* chip select hold */
#define SBCNTRL0_DAH 0x0f000000 /* data hold */
#define SBCNTRL0_ADH 0xf0000000 /* address hold */
#define SBCNTRL1(X) __SYSREG(0xd8c00204 + (X) * 0x10, u32) /* SBC bank ctrl1 regs */
#define SBCNTRL1_WED 0x00000f00 /* write enable delay */
#define SBCNTRL1_RED 0x0000f000 /* read enable delay */
#define SBCNTRL1_RWD 0x000f0000 /* SRW signal delay */
#define SBCNTRL1_ASW 0x00f00000 /* address strobe width */
#define SBCNTRL1_CSD 0x0f000000 /* chip select delay */
#define SBCNTRL1_ASD 0xf0000000 /* address strobe delay */
#define SBCNTRL2(X) __SYSREG(0xd8c00208 + (X) * 0x10, u32) /* SBC bank ctrl2 regs */
#define SBCNTRL2_WC 0x000000ff /* wait count */
#define SBCNTRL2_BWC 0x00000f00 /* burst wait count */
#define SBCNTRL2_WM 0x01000000 /* wait mode setting */
#define SBCNTRL2_WM_FIXEDWAIT 0x00000000 /* - fixed wait access */
#define SBCNTRL2_WM_HANDSHAKE 0x01000000 /* - handshake access */
#define SBCNTRL2_BM 0x02000000 /* bus synchronisation mode */
#define SBCNTRL2_BM_SYNC 0x00000000 /* - synchronous mode */
#define SBCNTRL2_BM_ASYNC 0x02000000 /* - asynchronous mode */
#define SBCNTRL2_BW 0x04000000 /* bus width */
#define SBCNTRL2_BW_32 0x00000000 /* - 32 bits */
#define SBCNTRL2_BW_16 0x04000000 /* - 16 bits */
#define SBCNTRL2_RWINV 0x08000000 /* R/W signal invert polarity */
#define SBCNTRL2_RWINV_NORM 0x00000000 /* - normal (read high) */
#define SBCNTRL2_RWINV_INV 0x08000000 /* - inverted (read low) */
#define SBCNTRL2_BT 0x70000000 /* bus type setting */
#define SBCNTRL2_BT_SRAM 0x00000000 /* - SRAM interface */
#define SBCNTRL2_BT_ADMUX 0x00000000 /* - addr/data multiplexed interface */
#define SBCNTRL2_BT_BROM 0x00000000 /* - burst ROM interface */
#define SBCNTRL2_BTSE 0x80000000 /* burst enable */
/* memory bus controller */
#define SDBASE(X) __SYSREG(0xda000008 + (X) * 0x4, u32) /* MBC base addr regs */
#define SDBASE_CE 0x00000001 /* chip enable */
#define SDBASE_CBAM 0x0000fff0 /* chip base address mask [31:20] */
#define SDBASE_CBAM_SHIFT 16
#define SDBASE_CBA 0xfff00000 /* chip base address [31:20] */
#define SDRAMBUS __SYSREG(0xda000000, u32) /* bus mode control reg */
#define SDRAMBUS_REFEN 0x00000004 /* refresh enable */
#define SDRAMBUS_TRC 0x00000018 /* refresh command delay time */
#define SDRAMBUS_BSTPT 0x00000020 /* burst stop command enable */
#define SDRAMBUS_PONSEQ 0x00000040 /* power on sequence */
#define SDRAMBUS_SELFREQ 0x00000080 /* self-refresh mode request */
#define SDRAMBUS_SELFON 0x00000100 /* self-refresh mode on */
#define SDRAMBUS_SIZE 0x00030000 /* SDRAM size */
#define SDRAMBUS_SIZE_64Mbit 0x00010000 /* 64Mbit SDRAM (x16) */
#define SDRAMBUS_SIZE_128Mbit 0x00020000 /* 128Mbit SDRAM (x16) */
#define SDRAMBUS_SIZE_256Mbit 0x00030000 /* 256Mbit SDRAM (x16) */
#define SDRAMBUS_TRASWAIT 0x000c0000 /* row address precharge command cycle number */
#define SDRAMBUS_REFNUM 0x00300000 /* refresh command number */
#define SDRAMBUS_BSTWAIT 0x00c00000 /* burst stop command cycle */
#define SDRAMBUS_SETWAIT 0x03000000 /* mode register setting command cycle */
#define SDRAMBUS_PREWAIT 0x0c000000 /* precharge command cycle */
#define SDRAMBUS_RASLATE 0x30000000 /* RAS latency */
#define SDRAMBUS_CASLATE 0xc0000000 /* CAS latency */
#define SDREFCNT __SYSREG(0xda000004, u32) /* refresh period reg */
#define SDREFCNT_PERI 0x00000fff /* refresh period */
#define SDSHDW __SYSREG(0xda000010, u32) /* test reg */
#endif /* __KERNEL__ */
#endif /* _ASM_BUSCTL_REGS_H */
+6
View File
@@ -0,0 +1,6 @@
#ifndef _ASM_BYTEORDER_H
#define _ASM_BYTEORDER_H
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_BYTEORDER_H */
+54
View File
@@ -0,0 +1,54 @@
/* MN10300 cache management registers
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
#include <asm/cpu-regs.h>
#include <proc/cache.h>
#ifndef __ASSEMBLY__
#define L1_CACHE_DISPARITY (L1_CACHE_NENTRIES * L1_CACHE_BYTES)
#else
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
#endif
/* data cache purge registers
* - read from the register to unconditionally purge that cache line
* - write address & 0xffffff00 to conditionally purge that cache line
* - clear LSB to request invalidation as well
*/
#define DCACHE_PURGE(WAY, ENTRY) \
__SYSREG(0xc8400000 + (WAY) * L1_CACHE_WAYDISP + \
(ENTRY) * L1_CACHE_BYTES, u32)
#define DCACHE_PURGE_WAY0(ENTRY) \
__SYSREG(0xc8400000 + 0 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
#define DCACHE_PURGE_WAY1(ENTRY) \
__SYSREG(0xc8400000 + 1 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
#define DCACHE_PURGE_WAY2(ENTRY) \
__SYSREG(0xc8400000 + 2 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
#define DCACHE_PURGE_WAY3(ENTRY) \
__SYSREG(0xc8400000 + 3 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
/* instruction cache access registers */
#define ICACHE_DATA(WAY, ENTRY, OFF) \
__SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
#define ICACHE_TAG(WAY, ENTRY) \
__SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
/* instruction cache access registers */
#define DCACHE_DATA(WAY, ENTRY, OFF) \
__SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
#define DCACHE_TAG(WAY, ENTRY) \
__SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
#endif /* _ASM_CACHE_H */
+116
View File
@@ -0,0 +1,116 @@
/* MN10300 Cache flushing
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
#ifndef __ASSEMBLY__
/* Keep includes the same across arches. */
#include <linux/mm.h>
/*
* virtually-indexed cache managment (our cache is physically indexed)
*/
#define flush_cache_all() do {} while (0)
#define flush_cache_mm(mm) do {} while (0)
#define flush_cache_dup_mm(mm) do {} while (0)
#define flush_cache_range(mm, start, end) do {} while (0)
#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
#define flush_cache_vmap(start, end) do {} while (0)
#define flush_cache_vunmap(start, end) do {} while (0)
#define flush_dcache_page(page) do {} while (0)
#define flush_dcache_mmap_lock(mapping) do {} while (0)
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
/*
* physically-indexed cache managment
*/
#ifndef CONFIG_MN10300_CACHE_DISABLED
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
#else
#define flush_icache_range(start, end) do {} while (0)
#define flush_icache_page(vma, pg) do {} while (0)
#endif
#define flush_icache_user_range(vma, pg, adr, len) \
flush_icache_range(adr, adr + len)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_page(vma, page); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
/*
* primitive routines
*/
#ifndef CONFIG_MN10300_CACHE_DISABLED
extern void mn10300_icache_inv(void);
extern void mn10300_dcache_inv(void);
extern void mn10300_dcache_inv_page(unsigned start);
extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
#ifdef CONFIG_MN10300_CACHE_WBACK
extern void mn10300_dcache_flush(void);
extern void mn10300_dcache_flush_page(unsigned start);
extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
extern void mn10300_dcache_flush_inv(void);
extern void mn10300_dcache_flush_inv_page(unsigned start);
extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
#else
#define mn10300_dcache_flush() do {} while (0)
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
#define mn10300_dcache_flush_range2(start, size) do {} while (0)
#define mn10300_dcache_flush_inv() mn10300_dcache_inv()
#define mn10300_dcache_flush_inv_page(start) \
mn10300_dcache_inv_page((start))
#define mn10300_dcache_flush_inv_range(start, end) \
mn10300_dcache_inv_range((start), (end))
#define mn10300_dcache_flush_inv_range2(start, size) \
mn10300_dcache_inv_range2((start), (size))
#endif /* CONFIG_MN10300_CACHE_WBACK */
#else
#define mn10300_icache_inv() do {} while (0)
#define mn10300_dcache_inv() do {} while (0)
#define mn10300_dcache_inv_page(start) do {} while (0)
#define mn10300_dcache_inv_range(start, end) do {} while (0)
#define mn10300_dcache_inv_range2(start, size) do {} while (0)
#define mn10300_dcache_flush() do {} while (0)
#define mn10300_dcache_flush_inv_page(start) do {} while (0)
#define mn10300_dcache_flush_inv() do {} while (0)
#define mn10300_dcache_flush_inv_range(start, end) do {} while (0)
#define mn10300_dcache_flush_inv_range2(start, size) do {} while (0)
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
#define mn10300_dcache_flush_range2(start, size) do {} while (0)
#endif /* CONFIG_MN10300_CACHE_DISABLED */
/*
* internal debugging function
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_CACHEFLUSH_H */
+86
View File
@@ -0,0 +1,86 @@
/* MN10300 Optimised checksumming code
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
extern __wsum csum_partial_copy_from_user(const void *src, void *dst,
int len, __wsum sum,
int *err_ptr);
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
extern __sum16 ip_compute_csum(const void *buff, int len);
#define csum_partial_copy_fromuser csum_partial_copy
extern __wsum csum_partial_copy(const void *src, void *dst, int len,
__wsum sum);
static inline __sum16 csum_fold(__wsum sum)
{
asm(
" add %1,%0 \n"
" addc 0xffff,%0 \n"
: "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000)
: "cc"
);
return (~sum) >> 16;
}
static inline __wsum csum_tcpudp_nofold(unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__wsum tmp;
tmp = (__wsum) ntohs(len) << 16;
tmp += (__wsum) proto << 8;
asm(
" add %1,%0 \n"
" addc %2,%0 \n"
" addc %3,%0 \n"
" addc 0,%0 \n"
: "=r" (sum)
: "r" (daddr), "r"(saddr), "r"(tmp), "0"(sum)
: "cc"
);
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
#undef _HAVE_ARCH_IPV6_CSUM
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void *dst, int len,
__wsum sum, int *err_ptr);
#endif /* _ASM_CHECKSUM_H */
+290
View File
@@ -0,0 +1,290 @@
/* MN10300 Core system registers
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_CPU_REGS_H
#define _ASM_CPU_REGS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
#ifdef CONFIG_MN10300_CPU_AM33V2
/* we tell the compiler to pretend to be AM33 so that it doesn't try and use
* the FP regs, but tell the assembler that we're actually allowed AM33v2
* instructions */
#ifndef __ASSEMBLY__
asm(" .am33_2\n");
#else
.am33_2
#endif
#endif
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#define __SYSREG(ADDR, TYPE) (*(volatile TYPE *)(ADDR))
#define __SYSREGC(ADDR, TYPE) (*(const volatile TYPE *)(ADDR))
#else
#define __SYSREG(ADDR, TYPE) ADDR
#define __SYSREGC(ADDR, TYPE) ADDR
#endif
/* CPU registers */
#define EPSW_FLAG_Z 0x00000001 /* zero flag */
#define EPSW_FLAG_N 0x00000002 /* negative flag */
#define EPSW_FLAG_C 0x00000004 /* carry flag */
#define EPSW_FLAG_V 0x00000008 /* overflow flag */
#define EPSW_IM 0x00000700 /* interrupt mode */
#define EPSW_IM_0 0x00000000 /* interrupt mode 0 */
#define EPSW_IM_1 0x00000100 /* interrupt mode 1 */
#define EPSW_IM_2 0x00000200 /* interrupt mode 2 */
#define EPSW_IM_3 0x00000300 /* interrupt mode 3 */
#define EPSW_IM_4 0x00000400 /* interrupt mode 4 */
#define EPSW_IM_5 0x00000500 /* interrupt mode 5 */
#define EPSW_IM_6 0x00000600 /* interrupt mode 6 */
#define EPSW_IM_7 0x00000700 /* interrupt mode 7 */
#define EPSW_IE 0x00000800 /* interrupt enable */
#define EPSW_S 0x00003000 /* software auxilliary bits */
#define EPSW_T 0x00008000 /* trace enable */
#define EPSW_nSL 0x00010000 /* not supervisor level */
#define EPSW_NMID 0x00020000 /* nonmaskable interrupt disable */
#define EPSW_nAR 0x00040000 /* register bank control */
#define EPSW_ML 0x00080000 /* monitor level */
#define EPSW_FE 0x00100000 /* FPU enable */
/* FPU registers */
#define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */
#define FPCR_EF_U 0x00000002 /* underflow FPU exception flag */
#define FPCR_EF_O 0x00000004 /* overflow FPU exception flag */
#define FPCR_EF_Z 0x00000008 /* zero divide FPU exception flag */
#define FPCR_EF_V 0x00000010 /* invalid operand FPU exception flag */
#define FPCR_EE_I 0x00000020 /* inexact result FPU exception enable */
#define FPCR_EE_U 0x00000040 /* underflow FPU exception enable */
#define FPCR_EE_O 0x00000080 /* overflow FPU exception enable */
#define FPCR_EE_Z 0x00000100 /* zero divide FPU exception enable */
#define FPCR_EE_V 0x00000200 /* invalid operand FPU exception enable */
#define FPCR_EC_I 0x00000400 /* inexact result FPU exception cause */
#define FPCR_EC_U 0x00000800 /* underflow FPU exception cause */
#define FPCR_EC_O 0x00001000 /* overflow FPU exception cause */
#define FPCR_EC_Z 0x00002000 /* zero divide FPU exception cause */
#define FPCR_EC_V 0x00004000 /* invalid operand FPU exception cause */
#define FPCR_RM 0x00030000 /* rounding mode */
#define FPCR_RM_NEAREST 0x00000000 /* - round to nearest value */
#define FPCR_FCC_U 0x00040000 /* FPU unordered condition code */
#define FPCR_FCC_E 0x00080000 /* FPU equal condition code */
#define FPCR_FCC_G 0x00100000 /* FPU greater than condition code */
#define FPCR_FCC_L 0x00200000 /* FPU less than condition code */
#define FPCR_INIT 0x00000000 /* no exceptions, rounding to nearest */
/* CPU control registers */
#define CPUP __SYSREG(0xc0000020, u16) /* CPU pipeline register */
#define CPUP_DWBD 0x0020 /* write buffer disable flag */
#define CPUP_IPFD 0x0040 /* instruction prefetch disable flag */
#define CPUP_EXM 0x0080 /* exception operation mode */
#define CPUP_EXM_AM33V1 0x0000 /* - AM33 v1 exception mode */
#define CPUP_EXM_AM33V2 0x0080 /* - AM33 v2 exception mode */
#define CPUM __SYSREG(0xc0000040, u16) /* CPU mode register */
#define CPUM_SLEEP 0x0004 /* set to enter sleep state */
#define CPUM_HALT 0x0008 /* set to enter halt state */
#define CPUM_STOP 0x0010 /* set to enter stop state */
#define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */
#define CPUREV_TYPE 0x0000000f /* CPU type */
#define CPUREV_TYPE_S 0
#define CPUREV_TYPE_AM33V1 0x00000000 /* - AM33 V1 core, AM33/1.00 arch */
#define CPUREV_TYPE_AM33V2 0x00000001 /* - AM33 V2 core, AM33/2.00 arch */
#define CPUREV_TYPE_AM34V1 0x00000002 /* - AM34 V1 core, AM33/2.00 arch */
#define CPUREV_REVISION 0x000000f0 /* CPU revision */
#define CPUREV_REVISION_S 4
#define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */
#define CPUREV_ICWAY_S 8
#define CPUREV_ICSIZE 0x0000f000 /* instruction cache way size */
#define CPUREV_ICSIZE_S 12
#define CPUREV_DCWAY 0x000f0000 /* number of data cache ways */
#define CPUREV_DCWAY_S 16
#define CPUREV_DCSIZE 0x00f00000 /* data cache way size */
#define CPUREV_DCSIZE_S 20
#define CPUREV_FPUTYPE 0x0f000000 /* FPU core type */
#define CPUREV_FPUTYPE_NONE 0x00000000 /* - no FPU core implemented */
#define CPUREV_OCDCTG 0xf0000000 /* on-chip debug function category */
#define DCR __SYSREG(0xc0000030, u16) /* Debug control register */
/* interrupt/exception control registers */
#define IVAR0 __SYSREG(0xc0000000, u16) /* interrupt vector 0 */
#define IVAR1 __SYSREG(0xc0000004, u16) /* interrupt vector 1 */
#define IVAR2 __SYSREG(0xc0000008, u16) /* interrupt vector 2 */
#define IVAR3 __SYSREG(0xc000000c, u16) /* interrupt vector 3 */
#define IVAR4 __SYSREG(0xc0000010, u16) /* interrupt vector 4 */
#define IVAR5 __SYSREG(0xc0000014, u16) /* interrupt vector 5 */
#define IVAR6 __SYSREG(0xc0000018, u16) /* interrupt vector 6 */
#define TBR __SYSREG(0xc0000024, u32) /* Trap table base */
#define TBR_TB 0xff000000 /* table base address bits 31-24 */
#define TBR_INT_CODE 0x00ffffff /* interrupt code */
#define DEAR __SYSREG(0xc0000038, u32) /* Data access exception address */
#define sISR __SYSREG(0xc0000044, u32) /* Supervisor interrupt status */
#define sISR_IRQICE 0x00000001 /* ICE interrupt */
#define sISR_ISTEP 0x00000002 /* single step interrupt */
#define sISR_MISSA 0x00000004 /* memory access address misalignment fault */
#define sISR_UNIMP 0x00000008 /* unimplemented instruction execution fault */
#define sISR_PIEXE 0x00000010 /* program interrupt */
#define sISR_MEMERR 0x00000020 /* illegal memory access fault */
#define sISR_IBREAK 0x00000040 /* instraction break interrupt */
#define sISR_DBSRL 0x00000080 /* debug serial interrupt */
#define sISR_PERIDB 0x00000100 /* peripheral debug interrupt */
#define sISR_EXUNIMP 0x00000200 /* unimplemented ex-instruction execution fault */
#define sISR_OBREAK 0x00000400 /* operand break interrupt */
#define sISR_PRIV 0x00000800 /* privileged instruction execution fault */
#define sISR_BUSERR 0x00001000 /* bus error fault */
#define sISR_DBLFT 0x00002000 /* double fault */
#define sISR_DBG 0x00008000 /* debug reserved interrupt */
#define sISR_ITMISS 0x00010000 /* instruction TLB miss */
#define sISR_DTMISS 0x00020000 /* data TLB miss */
#define sISR_ITEX 0x00040000 /* instruction TLB access exception */
#define sISR_DTEX 0x00080000 /* data TLB access exception */
#define sISR_ILGIA 0x00100000 /* illegal instruction access exception */
#define sISR_ILGDA 0x00200000 /* illegal data access exception */
#define sISR_IOIA 0x00400000 /* internal I/O space instruction access excep */
#define sISR_PRIVA 0x00800000 /* privileged space instruction access excep */
#define sISR_PRIDA 0x01000000 /* privileged space data access excep */
#define sISR_DISA 0x02000000 /* data space instruction access excep */
#define sISR_SYSC 0x04000000 /* system call instruction excep */
#define sISR_FPUD 0x08000000 /* FPU disabled excep */
#define sISR_FPUUI 0x10000000 /* FPU unimplemented instruction excep */
#define sISR_FPUOP 0x20000000 /* FPU operation excep */
#define sISR_NE 0x80000000 /* multiple synchronous exceptions excep */
/* cache control registers */
#define CHCTR __SYSREG(0xc0000070, u16) /* cache control */
#define CHCTR_ICEN 0x0001 /* instruction cache enable */
#define CHCTR_DCEN 0x0002 /* data cache enable */
#define CHCTR_ICBUSY 0x0004 /* instruction cache busy */
#define CHCTR_DCBUSY 0x0008 /* data cache busy */
#define CHCTR_ICINV 0x0010 /* instruction cache invalidate */
#define CHCTR_DCINV 0x0020 /* data cache invalidate */
#define CHCTR_DCWTMD 0x0040 /* data cache writing mode */
#define CHCTR_DCWTMD_WRBACK 0x0000 /* - write back mode */
#define CHCTR_DCWTMD_WRTHROUGH 0x0040 /* - write through mode */
#define CHCTR_DCALMD 0x0080 /* data cache allocation mode */
#define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */
#define CHCTR_DCWMD 0xf000 /* data cache way mode */
/* MMU control registers */
#define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */
#define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */
#define MMUCTR_ITE 0x00000040 /* instruction TLB enable */
#define MMUCTR_IIV 0x00000080 /* instruction TLB invalidate */
#define MMUCTR_ITL 0x00000700 /* instruction TLB lock pointer */
#define MMUCTR_ITL_NOLOCK 0x00000000 /* - no lock */
#define MMUCTR_ITL_LOCK0 0x00000100 /* - entry 0 locked */
#define MMUCTR_ITL_LOCK0_1 0x00000200 /* - entry 0-1 locked */
#define MMUCTR_ITL_LOCK0_3 0x00000300 /* - entry 0-3 locked */
#define MMUCTR_ITL_LOCK0_7 0x00000400 /* - entry 0-7 locked */
#define MMUCTR_ITL_LOCK0_15 0x00000500 /* - entry 0-15 locked */
#define MMUCTR_CE 0x00008000 /* cacheable bit enable */
#define MMUCTR_DRP 0x003f0000 /* data TLB replace pointer */
#define MMUCTR_DTE 0x00400000 /* data TLB enable */
#define MMUCTR_DIV 0x00800000 /* data TLB invalidate */
#define MMUCTR_DTL 0x07000000 /* data TLB lock pointer */
#define MMUCTR_DTL_NOLOCK 0x00000000 /* - no lock */
#define MMUCTR_DTL_LOCK0 0x01000000 /* - entry 0 locked */
#define MMUCTR_DTL_LOCK0_1 0x02000000 /* - entry 0-1 locked */
#define MMUCTR_DTL_LOCK0_3 0x03000000 /* - entry 0-3 locked */
#define MMUCTR_DTL_LOCK0_7 0x04000000 /* - entry 0-7 locked */
#define MMUCTR_DTL_LOCK0_15 0x05000000 /* - entry 0-15 locked */
#define PIDR __SYSREG(0xc0000094, u16) /* PID register */
#define PIDR_PID 0x00ff /* process identifier */
#define PTBR __SYSREG(0xc0000098, unsigned long) /* Page table base register */
#define IPTEL __SYSREG(0xc00000a0, u32) /* instruction TLB entry */
#define DPTEL __SYSREG(0xc00000b0, u32) /* data TLB entry */
#define xPTEL_V 0x00000001 /* TLB entry valid */
#define xPTEL_UNUSED1 0x00000002 /* unused bit */
#define xPTEL_UNUSED2 0x00000004 /* unused bit */
#define xPTEL_C 0x00000008 /* cached if set */
#define xPTEL_PV 0x00000010 /* page valid */
#define xPTEL_D 0x00000020 /* dirty */
#define xPTEL_PR 0x000001c0 /* page protection */
#define xPTEL_PR_ROK 0x00000000 /* - R/O kernel */
#define xPTEL_PR_RWK 0x00000100 /* - R/W kernel */
#define xPTEL_PR_ROK_ROU 0x00000080 /* - R/O kernel and R/O user */
#define xPTEL_PR_RWK_ROU 0x00000180 /* - R/W kernel and R/O user */
#define xPTEL_PR_RWK_RWU 0x000001c0 /* - R/W kernel and R/W user */
#define xPTEL_G 0x00000200 /* global (use PID if 0) */
#define xPTEL_PS 0x00000c00 /* page size */
#define xPTEL_PS_4Kb 0x00000000 /* - 4Kb page */
#define xPTEL_PS_128Kb 0x00000400 /* - 128Kb page */
#define xPTEL_PS_1Kb 0x00000800 /* - 1Kb page */
#define xPTEL_PS_4Mb 0x00000c00 /* - 4Mb page */
#define xPTEL_PPN 0xfffff006 /* physical page number */
#define xPTEL_V_BIT 0 /* bit numbers corresponding to above masks */
#define xPTEL_UNUSED1_BIT 1
#define xPTEL_UNUSED2_BIT 2
#define xPTEL_C_BIT 3
#define xPTEL_PV_BIT 4
#define xPTEL_D_BIT 5
#define xPTEL_G_BIT 9
#define IPTEU __SYSREG(0xc00000a4, u32) /* instruction TLB virtual addr */
#define DPTEU __SYSREG(0xc00000b4, u32) /* data TLB virtual addr */
#define xPTEU_VPN 0xfffffc00 /* virtual page number */
#define xPTEU_PID 0x000000ff /* process identifier to which applicable */
#define IPTEL2 __SYSREG(0xc00000a8, u32) /* instruction TLB entry */
#define DPTEL2 __SYSREG(0xc00000b8, u32) /* data TLB entry */
#define xPTEL2_V 0x00000001 /* TLB entry valid */
#define xPTEL2_C 0x00000002 /* cacheable */
#define xPTEL2_PV 0x00000004 /* page valid */
#define xPTEL2_D 0x00000008 /* dirty */
#define xPTEL2_PR 0x00000070 /* page protection */
#define xPTEL2_PR_ROK 0x00000000 /* - R/O kernel */
#define xPTEL2_PR_RWK 0x00000040 /* - R/W kernel */
#define xPTEL2_PR_ROK_ROU 0x00000020 /* - R/O kernel and R/O user */
#define xPTEL2_PR_RWK_ROU 0x00000060 /* - R/W kernel and R/O user */
#define xPTEL2_PR_RWK_RWU 0x00000070 /* - R/W kernel and R/W user */
#define xPTEL2_G 0x00000080 /* global (use PID if 0) */
#define xPTEL2_PS 0x00000300 /* page size */
#define xPTEL2_PS_4Kb 0x00000000 /* - 4Kb page */
#define xPTEL2_PS_128Kb 0x00000100 /* - 128Kb page */
#define xPTEL2_PS_1Kb 0x00000200 /* - 1Kb page */
#define xPTEL2_PS_4Mb 0x00000300 /* - 4Mb page */
#define xPTEL2_PPN 0xfffffc00 /* physical page number */
#define MMUFCR __SYSREGC(0xc000009c, u32) /* MMU exception cause */
#define MMUFCR_IFC __SYSREGC(0xc000009c, u16) /* MMU instruction excep cause */
#define MMUFCR_DFC __SYSREGC(0xc000009e, u16) /* MMU data exception cause */
#define MMUFCR_xFC_TLBMISS 0x0001 /* TLB miss flag */
#define MMUFCR_xFC_INITWR 0x0002 /* initial write excep flag */
#define MMUFCR_xFC_PGINVAL 0x0004 /* page invalid excep flag */
#define MMUFCR_xFC_PROTVIOL 0x0008 /* protection violation excep flag */
#define MMUFCR_xFC_ACCESS 0x0010 /* access level flag */
#define MMUFCR_xFC_ACCESS_USR 0x0000 /* - user mode */
#define MMUFCR_xFC_ACCESS_SR 0x0010 /* - supervisor mode */
#define MMUFCR_xFC_TYPE 0x0020 /* access type flag */
#define MMUFCR_xFC_TYPE_READ 0x0000 /* - read */
#define MMUFCR_xFC_TYPE_WRITE 0x0020 /* - write */
#define MMUFCR_xFC_PR 0x01c0 /* page protection flag */
#define MMUFCR_xFC_PR_ROK 0x0000 /* - R/O kernel */
#define MMUFCR_xFC_PR_RWK 0x0100 /* - R/W kernel */
#define MMUFCR_xFC_PR_ROK_ROU 0x0080 /* - R/O kernel and R/O user */
#define MMUFCR_xFC_PR_RWK_ROU 0x0180 /* - R/W kernel and R/O user */
#define MMUFCR_xFC_PR_RWK_RWU 0x01c0 /* - R/W kernel and R/W user */
#define MMUFCR_xFC_ILLADDR 0x0200 /* illegal address excep flag */
#endif /* __KERNEL__ */
#endif /* _ASM_CPU_REGS_H */
+1
View File
@@ -0,0 +1 @@
#include <asm-generic/cputime.h>
+37
View File
@@ -0,0 +1,37 @@
/* MN10300 Current task structure accessor
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_CURRENT_H
#define _ASM_CURRENT_H
#include <linux/thread_info.h>
/*
* dedicate E2 to keeping the current task pointer
*/
#ifdef CONFIG_MN10300_CURRENT_IN_E2
register struct task_struct *const current asm("e2") __attribute__((used));
#define get_current() current
extern struct task_struct *__current;
#else
static inline __attribute__((const))
struct task_struct *get_current(void)
{
return current_thread_info()->task;
}
#define current get_current()
#endif
#endif /* _ASM_CURRENT_H */
+19
View File
@@ -0,0 +1,19 @@
/* MN10300 Uninterruptible delay routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_DELAY_H
#define _ASM_DELAY_H
extern void __udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
#define udelay(n) __udelay(n)
#endif /* _ASM_DELAY_H */
+1
View File
@@ -0,0 +1 @@
#include <asm-generic/device.h>
+100
View File
@@ -0,0 +1,100 @@
/* MN10300 64-bit division
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_DIV64
#define _ASM_DIV64
#include <linux/types.h>
extern void ____unhandled_size_in_do_div___(void);
/*
* divide n by base, leaving the result in n and returning the remainder
* - we can do this quite efficiently on the MN10300 by cascading the divides
* through the MDR register
*/
#define do_div(n, base) \
({ \
unsigned __rem = 0; \
if (sizeof(n) <= 4) { \
asm("mov %1,mdr \n" \
"divu %2,%0 \n" \
"mov mdr,%1 \n" \
: "+r"(n), "=d"(__rem) \
: "r"(base), "1"(__rem) \
: "cc" \
); \
} else if (sizeof(n) <= 8) { \
union { \
unsigned long long l; \
u32 w[2]; \
} __quot; \
__quot.l = n; \
asm("mov %0,mdr \n" /* MDR = 0 */ \
"divu %3,%1 \n" \
/* __quot.MSL = __div.MSL / base, */ \
/* MDR = MDR:__div.MSL % base */ \
"divu %3,%2 \n" \
/* __quot.LSL = MDR:__div.LSL / base, */ \
/* MDR = MDR:__div.LSL % base */ \
"mov mdr,%0 \n" \
: "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0]) \
: "r"(base), "0"(__rem), "1"(__quot.w[1]), \
"2"(__quot.w[0]) \
: "cc" \
); \
n = __quot.l; \
} else { \
____unhandled_size_in_do_div___(); \
} \
__rem; \
})
/*
* do an unsigned 32-bit multiply and divide with intermediate 64-bit product
* so as not to lose accuracy
* - we use the MDR register to hold the MSW of the product
*/
static inline __attribute__((const))
unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
{
unsigned result;
asm("mulu %2,%0 \n" /* MDR:val = val*mult */
"divu %3,%0 \n" /* val = MDR:val/div;
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
);
return result;
}
/*
* do a signed 32-bit multiply and divide with intermediate 64-bit product so
* as not to lose accuracy
* - we use the MDR register to hold the MSW of the product
*/
static inline __attribute__((const))
signed __muldiv64s(signed val, signed mult, signed div)
{
signed result;
asm("mul %2,%0 \n" /* MDR:val = val*mult */
"div %3,%0 \n" /* val = MDR:val/div;
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
);
return result;
}
#endif /* _ASM_DIV64 */
+234
View File
@@ -0,0 +1,234 @@
/* DMA mapping routines for the MN10300 arch
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <asm/cache.h>
#include <asm/io.h>
extern void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag);
extern void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
* 32-bit bus address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
* either pci_unmap_single or pci_dma_sync_single is performed.
*/
static inline
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
mn10300_dcache_flush_inv();
return virt_to_bus(ptr);
}
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
* match what was provided for in a previous pci_map_single call. All other
* usages are undefined.
*
* After this call, reads by the cpu to the buffer are guarenteed to see
* whatever the device wrote there.
*/
static inline
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scather-gather version of the above pci_map_single interface.
* Here the scatter gather list elements are each tagged with the appropriate
* dma address and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of DMA
* address/length pairs than there are SG table elements. (for example
* via virtual mapping capabilities) The routine returns the number of
* addr/length pairs actually used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are the same
* here.
*/
static inline
int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
BUG_ON(!valid_dma_direction(direction));
WARN_ON(nents == 0 || sglist[0].length == 0);
for_each_sg(sglist, sg, nents, i) {
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg);
}
mn10300_dcache_flush_inv();
return nents;
}
/*
* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
static inline
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
}
/*
* pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
* to pci_map_single, but takes a struct page instead of a virtual address
*/
static inline
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return page_to_bus(page) + offset;
}
static inline
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
/*
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
*
* If you perform a pci_map_single() but wish to interrogate the buffer using
* the cpu, yet do not wish to teardown the PCI dma mapping, you must call this
* function before doing so. At the next point you give the PCI dma address
* back to the card, the device again owns the buffer.
*/
static inline
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
}
static inline
void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
mn10300_dcache_flush_inv();
}
static inline
void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
mn10300_dcache_flush_inv();
}
/*
* Make physical memory consistent for a set of streaming mode DMA translations
* after a transfer.
*
* The same as pci_dma_sync_single but for a scatter-gather list, same rules
* and usage.
*/
static inline
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction)
{
}
static inline
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction)
{
mn10300_dcache_flush_inv();
}
static inline
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
/*
* Return whether the given PCI device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits during
* PCI bus mastering, then you would pass 0x00ffffff as the mask to this
* function.
*/
static inline
int dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s, so we can't
* guarantee allocations that must be within a tighter range than
* GFP_DMA
*/
if (mask < 0x00ffffff)
return 0;
return 1;
}
static inline
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline
int dma_get_cache_alignment(void)
{
return 1 << L1_CACHE_SHIFT;
}
#define dma_is_consistent(d) (1)
static inline
void dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction direction)
{
mn10300_dcache_flush_inv();
}
#endif
+118
View File
@@ -0,0 +1,118 @@
/* MN10300 ISA DMA handlers and definitions
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <asm/system.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <linux/delay.h>
#undef MAX_DMA_CHANNELS /* switch off linux/kernel/dma.c */
#define MAX_DMA_ADDRESS 0xbfffffff
extern spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* enable/disable a specific DMA channel */
static inline void enable_dma(unsigned int dmanr)
{
}
static inline void disable_dma(unsigned int dmanr)
{
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static inline void clear_dma_ff(unsigned int dmanr)
{
}
/* set mode (above) for a specific DMA channel */
static inline void set_dma_mode(unsigned int dmanr, char mode)
{
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static inline void set_dma_page(unsigned int dmanr, char pagenr)
{
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
{
}
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static inline void set_dma_count(unsigned int dmanr, unsigned int count)
{
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static inline int get_dma_residue(unsigned int dmanr)
{
return 0;
}
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr);
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */

Some files were not shown because too many files have changed in this diff Show More