Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6: (108 commits)
  sh: Fix occasional flush_cache_4096() stack corruption.
  sh: Calculate shm alignment at runtime.
  sh: dma-mapping compile fixes.
  sh: Initial vsyscall page support.
  sh: Clean up PAGE_SIZE definition for assembly use.
  sh: Selective flush_cache_mm() flushing.
  sh: More intelligent entry_mask/way_size calculation.
  sh: Support for L2 cache on newer SH-4A CPUs.
  sh: Update kexec support for API changes.
  sh: Optimized readsl()/writesl() support.
  sh: Report movli.l/movco.l capabilities.
  sh: CPU flags in AT_HWCAP in ELF auxvt.
  sh: Add support for 4K stacks.
  sh: Enable /proc/kcore support.
  sh: stack debugging support.
  sh: select CONFIG_EMBEDDED.
  sh: machvec rework.
  sh: Solution Engine SH7343 board support.
  sh: SH7710VoIPGW board support.
  sh: Enable verbose BUG() support.
  ...
This commit is contained in:
Linus Torvalds
2006-09-27 08:49:07 -07:00
355 changed files with 20724 additions and 9021 deletions
+8
View File
@@ -14,11 +14,19 @@
#include <asm/cpu/addrspace.h>
/* Memory segments (32bit Privileged mode addresses) */
#ifndef CONFIG_CPU_SH2A
#define P0SEG 0x00000000
#define P1SEG 0x80000000
#define P2SEG 0xa0000000
#define P3SEG 0xc0000000
#define P4SEG 0xe0000000
#else
#define P0SEG 0x00000000
#define P1SEG 0x00000000
#define P2SEG 0x20000000
#define P3SEG 0x00000000
#define P4SEG 0x80000000
#endif
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
-86
View File
@@ -1,86 +0,0 @@
/*
* include/asm-sh/io_adx.h
*
* Copyright (C) 2001 A&D Co., Ltd.
*
* This file may be copied or modified under the terms of the GNU
* General Public License. See linux/COPYING for more information.
*
* IO functions for an A&D ADX Board
*/
#ifndef _ASM_SH_IO_ADX_H
#define _ASM_SH_IO_ADX_H
#include <asm/io_generic.h>
extern unsigned char adx_inb(unsigned long port);
extern unsigned short adx_inw(unsigned long port);
extern unsigned int adx_inl(unsigned long port);
extern void adx_outb(unsigned char value, unsigned long port);
extern void adx_outw(unsigned short value, unsigned long port);
extern void adx_outl(unsigned int value, unsigned long port);
extern unsigned char adx_inb_p(unsigned long port);
extern void adx_outb_p(unsigned char value, unsigned long port);
extern void adx_insb(unsigned long port, void *addr, unsigned long count);
extern void adx_insw(unsigned long port, void *addr, unsigned long count);
extern void adx_insl(unsigned long port, void *addr, unsigned long count);
extern void adx_outsb(unsigned long port, const void *addr, unsigned long count);
extern void adx_outsw(unsigned long port, const void *addr, unsigned long count);
extern void adx_outsl(unsigned long port, const void *addr, unsigned long count);
extern unsigned char adx_readb(unsigned long addr);
extern unsigned short adx_readw(unsigned long addr);
extern unsigned int adx_readl(unsigned long addr);
extern void adx_writeb(unsigned char b, unsigned long addr);
extern void adx_writew(unsigned short b, unsigned long addr);
extern void adx_writel(unsigned int b, unsigned long addr);
extern void * adx_ioremap(unsigned long offset, unsigned long size);
extern void adx_iounmap(void *addr);
extern unsigned long adx_isa_port2addr(unsigned long offset);
extern void setup_adx(void);
extern void init_adx_IRQ(void);
#ifdef __WANT_IO_DEF
#define __inb adx_inb
#define __inw adx_inw
#define __inl adx_inl
#define __outb adx_outb
#define __outw adx_outw
#define __outl adx_outl
#define __inb_p adx_inb_p
#define __inw_p adx_inw
#define __inl_p adx_inl
#define __outb_p adx_outb_p
#define __outw_p adx_outw
#define __outl_p adx_outl
#define __insb adx_insb
#define __insw adx_insw
#define __insl adx_insl
#define __outsb adx_outsb
#define __outsw adx_outsw
#define __outsl adx_outsl
#define __readb adx_readb
#define __readw adx_readw
#define __readl adx_readl
#define __writeb adx_writeb
#define __writew adx_writew
#define __writel adx_writel
#define __isa_port2addr adx_isa_port2addr
#define __ioremap adx_ioremap
#define __iounmap adx_iounmap
#endif
#endif /* _ASM_SH_IO_AANDD_H */
+46
View File
@@ -0,0 +1,46 @@
/*
* Copyright 2006 (c) Andriy Skulysh <askulysh@gmail.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#ifndef __ASM_SH_APM_H
#define __ASM_SH_APM_H
#define APM_AC_OFFLINE 0
#define APM_AC_ONLINE 1
#define APM_AC_BACKUP 2
#define APM_AC_UNKNOWN 0xff
#define APM_BATTERY_STATUS_HIGH 0
#define APM_BATTERY_STATUS_LOW 1
#define APM_BATTERY_STATUS_CRITICAL 2
#define APM_BATTERY_STATUS_CHARGING 3
#define APM_BATTERY_STATUS_NOT_PRESENT 4
#define APM_BATTERY_STATUS_UNKNOWN 0xff
#define APM_BATTERY_LIFE_UNKNOWN 0xFFFF
#define APM_BATTERY_LIFE_MINUTES 0x8000
#define APM_BATTERY_LIFE_VALUE_MASK 0x7FFF
#define APM_BATTERY_FLAG_HIGH (1 << 0)
#define APM_BATTERY_FLAG_LOW (1 << 1)
#define APM_BATTERY_FLAG_CRITICAL (1 << 2)
#define APM_BATTERY_FLAG_CHARGING (1 << 3)
#define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7)
#define APM_BATTERY_FLAG_UNKNOWN 0xff
#define APM_UNITS_MINS 0
#define APM_UNITS_SECS 1
#define APM_UNITS_UNKNOWN -1
extern int (*apm_get_info)(char *buf, char **start, off_t fpos, int length);
extern int apm_suspended;
void apm_queue_event(apm_event_t event);
#endif
+97 -9
View File
@@ -14,6 +14,7 @@ typedef struct { volatile int counter; } atomic_t;
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#include <linux/compiler.h>
#include <asm/system.h>
/*
@@ -21,49 +22,110 @@ typedef struct { volatile int counter; } atomic_t;
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static __inline__ void atomic_add(int i, atomic_t * v)
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_add \n"
" add %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;
local_irq_save(flags);
*(long *)v += i;
local_irq_restore(flags);
#endif
}
static __inline__ void atomic_sub(int i, atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_sub \n"
" sub %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;
local_irq_save(flags);
*(long *)v -= i;
local_irq_restore(flags);
#endif
}
static __inline__ int atomic_add_return(int i, atomic_t * v)
/*
* SH-4A note:
*
* We basically get atomic_xxx_return() for free compared with
* atomic_xxx(). movli.l/movco.l require r0 due to the instruction
* encoding, so the retval is automatically set without having to
* do any special work.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long temp, flags;
unsigned long temp;
#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_add_return \n"
" add %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;
local_irq_save(flags);
temp = *(long *)v;
temp += i;
*(long *)v = temp;
local_irq_restore(flags);
#endif
return temp;
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ int atomic_sub_return(int i, atomic_t * v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long temp, flags;
unsigned long temp;
#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_sub_return \n"
" sub %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp), "=r" (&v->counter)
: "r" (i), "r" (&v->counter)
: "t");
#else
unsigned long flags;
local_irq_save(flags);
temp = *(long *)v;
temp -= i;
*(long *)v = temp;
local_irq_restore(flags);
#endif
return temp;
}
@@ -118,22 +180,48 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_clear_mask \n"
" and %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (~mask), "r" (&v->counter)
: "t");
#else
unsigned long flags;
local_irq_save(flags);
*(long *)v &= ~mask;
local_irq_restore(flags);
#endif
}
static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
#ifdef CONFIG_CPU_SH4A
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%3, %0 ! atomic_set_mask \n"
" or %2, %0 \n"
" movco.l %0, @%3 \n"
" bf 1b \n"
: "=&z" (tmp), "=r" (&v->counter)
: "r" (mask), "r" (&v->counter)
: "t");
#else
unsigned long flags;
local_irq_save(flags);
*(long *)v |= mask;
local_irq_restore(flags);
#endif
}
/* Atomic operations are already serializing on SH */
+14
View File
@@ -1,4 +1,18 @@
#ifndef __ASM_SH_AUXVEC_H
#define __ASM_SH_AUXVEC_H
/*
* Architecture-neutral AT_ values in 0-17, leave some room
* for more of them.
*/
#ifdef CONFIG_VSYSCALL
/*
* Only define this in the vsyscall case, the entry point to
* the vsyscall page gets placed here. The kernel will attempt
* to build a gate VMA we don't care about otherwise..
*/
#define AT_SYSINFO_EHDR 33
#endif
#endif /* __ASM_SH_AUXVEC_H */
+8 -8
View File
@@ -6,7 +6,7 @@
/* For __swab32 */
#include <asm/byteorder.h>
static __inline__ void set_bit(int nr, volatile void * addr)
static inline void set_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
@@ -24,7 +24,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static __inline__ void clear_bit(int nr, volatile void * addr)
static inline void clear_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
@@ -37,7 +37,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
static __inline__ void change_bit(int nr, volatile void * addr)
static inline void change_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
@@ -50,7 +50,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
local_irq_restore(flags);
}
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
static inline int test_and_set_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
@@ -66,7 +66,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
return retval;
}
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
static inline int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
@@ -82,7 +82,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
return retval;
}
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
static inline int test_and_change_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
@@ -100,7 +100,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
#include <asm-generic/bitops/non-atomic.h>
static __inline__ unsigned long ffz(unsigned long word)
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
@@ -120,7 +120,7 @@ static __inline__ unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ unsigned long __ffs(unsigned long word)
static inline unsigned long __ffs(unsigned long word)
{
unsigned long result;
+4
View File
@@ -32,6 +32,10 @@ static void __init check_bugs(void)
case CPU_SH7750 ... CPU_SH4_501:
*p++ = '4';
break;
case CPU_SH7770 ... CPU_SH7781:
*p++ = '4';
*p++ = 'a';
break;
default:
*p++ = '?';
*p++ = '!';
+18 -12
View File
@@ -10,7 +10,6 @@
#ifdef __KERNEL__
#include <asm/cpu/cache.h>
#include <asm/cpu/cacheflush.h>
#define SH_CACHE_VALID 1
#define SH_CACHE_UPDATED 2
@@ -23,24 +22,31 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
struct cache_info {
unsigned int ways;
unsigned int sets;
unsigned int linesz;
unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */
unsigned int linesz; /* Cache line size (bytes) */
unsigned int way_size; /* sets * line size */
/*
* way_incr is the address offset for accessing the next way
* in memory mapped cache array ops.
*/
unsigned int way_incr;
unsigned int entry_shift;
unsigned int entry_mask;
/*
* Compute a mask which selects the address bits which overlap between
* 1. those used to select the cache set during indexing
* 2. those in the physical page number.
*/
unsigned int alias_mask;
unsigned int n_aliases; /* Number of aliases */
unsigned long flags;
};
/* Flush (write-back only) a region (smaller than a page) */
extern void __flush_wback_region(void *start, int size);
/* Flush (write-back & invalidate) a region (smaller than a page) */
extern void __flush_purge_region(void *start, int size);
/* Flush (invalidate only) a region (smaller than a page) */
extern void __flush_invalidate_region(void *start, int size);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */
+3
View File
@@ -2,6 +2,7 @@
#define __ASM_SH_CACHEFLUSH_H
#ifdef __KERNEL__
#include <linux/mm.h>
#include <asm/cpu/cacheflush.h>
/* Flush (write-back only) a region (smaller than a page) */
@@ -27,5 +28,7 @@ extern void __flush_invalidate_region(void *start, int size);
memcpy(dst, src, len); \
} while (0)
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
-22
View File
@@ -1,22 +0,0 @@
/*
* include/asm-sh/io_cat68701.h
*
* Copyright 2000 Stuart Menefy (stuart.menefy@st.com)
* 2001 Yutarou Ebihar (ebihara@si-linux.com)
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* IO functions for an AONE Corp. CAT-68701 SH7708 Borad
*/
#ifndef _ASM_SH_IO_CAT68701_H
#define _ASM_SH_IO_CAT68701_H
extern unsigned long cat68701_isa_port2addr(unsigned long offset);
extern int cat68701_irq_demux(int irq);
extern void init_cat68701_IRQ(void);
extern void heartbeat_cat68701(void);
#endif /* _ASM_SH_IO_CAT68701_H */
+2
View File
@@ -159,6 +159,7 @@ static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
}
#define _HAVE_ARCH_IPV6_CSUM
#ifdef CONFIG_IPV6
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr,
__u32 len,
@@ -194,6 +195,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
return csum_fold(sum);
}
#endif
/*
* Copy and checksum to user
+24
View File
@@ -0,0 +1,24 @@
#ifndef __ASM_SH_CPU_FEATURES_H
#define __ASM_SH_CPU_FEATURES_H
/*
* Processor flags
*
* Note: When adding a new flag, keep cpu_flags[] in
* arch/sh/kernel/setup.c in sync so symbolic name
* mapping of the processor flags has a chance of being
* reasonably accurate.
*
* These flags are also available through the ELF
* auxiliary vector as AT_HWCAP.
*/
#define CPU_HAS_FPU 0x0001 /* Hardware FPU support */
#define CPU_HAS_P2_FLUSH_BUG 0x0002 /* Need to flush the cache in P2 area */
#define CPU_HAS_MMU_PAGE_ASSOC 0x0004 /* SH3: TLB way selection bit support */
#define CPU_HAS_DSP 0x0008 /* SH-DSP: DSP support */
#define CPU_HAS_PERF_COUNTER 0x0010 /* Hardware performance counters */
#define CPU_HAS_PTEA 0x0020 /* PTEA register */
#define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */
#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */
#endif /* __ASM_SH_CPU_FEATURES_H */
-16
View File
@@ -1,16 +0,0 @@
/*
* include/asm-sh/cpu-sh2/shmparam.h
*
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_CPU_SH2_SHMPARAM_H
#define __ASM_CPU_SH2_SHMPARAM_H
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif /* __ASM_CPU_SH2_SHMPARAM_H */
+1 -3
View File
@@ -26,12 +26,10 @@
#define CCR_CACHE_ENABLE CCR_CACHE_CE
#define CCR_CACHE_INVALIDATE CCR_CACHE_CF
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || defined(CONFIG_CPU_SUBTYPE_SH7710)
#define CCR3 0xa40000b4
#define CCR_CACHE_16KB 0x00010000
#define CCR_CACHE_32KB 0x00020000
#endif
#endif /* __ASM_CPU_SH3_CACHE_H */
+18 -38
View File
@@ -10,7 +10,7 @@
#ifndef __ASM_CPU_SH3_CACHEFLUSH_H
#define __ASM_CPU_SH3_CACHEFLUSH_H
/*
/*
* Cache flushing:
*
* - flush_cache_all() flushes entire cache
@@ -35,53 +35,33 @@
/* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000
struct page;
struct mm_struct;
struct vm_area_struct;
#define PG_mapped PG_arch_1
extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
extern void flush_dcache_page(struct page *pg);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
void flush_dcache_page(struct page *pg);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#endif
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
/* SH3 has unified cache so no special action needed here */
#define flush_cache_sigtramp(vaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define p3_cache_init() do { } while (0)
#define PG_mapped PG_arch_1
/* We provide our own get_unmapped_area to avoid cache alias issue */
#define HAVE_ARCH_UNMAPPED_AREA
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_sigtramp(vaddr) do { } while (0)
#define p3_cache_init() do { } while (0)
#define HAVE_ARCH_UNMAPPED_AREA
#endif
#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
+4
View File
@@ -18,5 +18,9 @@
#define MIN_DIVISOR_NR 0
#define MAX_DIVISOR_NR 4
#define FRQCR_CKOEN 0x0100
#define FRQCR_PLLEN 0x0080
#define FRQCR_PSTBY 0x0040
#endif /* __ASM_CPU_SH3_FREQ_H */
+6 -2
View File
@@ -27,8 +27,12 @@
#define TRA 0xffffffd0
#define EXPEVT 0xffffffd4
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709) || \
defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || \
defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7710)
#define INTEVT 0xa4000000 /* INTEVTE2(0xa4000000) */
#else
#define INTEVT 0xffffffd8
-16
View File
@@ -1,16 +0,0 @@
/*
* include/asm-sh/cpu-sh3/shmparam.h
*
* Copyright (C) 1999 Niibe Yutaka
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_CPU_SH3_SHMPARAM_H
#define __ASM_CPU_SH3_SHMPARAM_H
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif /* __ASM_CPU_SH3_SHMPARAM_H */
+5 -3
View File
@@ -20,9 +20,14 @@
* SH7710
* SH7720
* SH7300
* SH7710
* ---------------------------------------------------------------------------
*/
#if !defined(CONFIG_CPU_SUBTYPE_SH7727)
#define TMU_TOCR 0xfffffe90 /* Byte access */
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7710)
#define TMU_TSTR 0xa412fe92 /* Byte access */
@@ -39,9 +44,6 @@
#define TMU2_TCR 0xa412feb4 /* Word access */
#else
#if !defined(CONFIG_CPU_SUBTYPE_SH7727)
#define TMU_TOCR 0xfffffe90 /* Byte access */
#endif
#define TMU_TSTR 0xfffffe92 /* Byte access */
#define TMU0_TCOR 0xfffffe94 /* Long access */
+14 -1
View File
@@ -11,6 +11,19 @@
#ifndef __ASM_CPU_SH3_UBC_H
#define __ASM_CPU_SH3_UBC_H
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
#define UBC_BARA 0xa4ffffb0
#define UBC_BAMRA 0xa4ffffb4
#define UBC_BBRA 0xa4ffffb8
#define UBC_BASRA 0xffffffe4
#define UBC_BARB 0xa4ffffa0
#define UBC_BAMRB 0xa4ffffa4
#define UBC_BBRB 0xa4ffffa8
#define UBC_BASRB 0xffffffe8
#define UBC_BDRB 0xa4ffff90
#define UBC_BDMRB 0xa4ffff94
#define UBC_BRCR 0xa4ffff98
#else
#define UBC_BARA 0xffffffb0
#define UBC_BAMRA 0xffffffb4
#define UBC_BBRA 0xffffffb8
@@ -22,6 +35,6 @@
#define UBC_BDRB 0xffffff90
#define UBC_BDMRB 0xffffff94
#define UBC_BRCR 0xffffff98
#endif
#endif /* __ASM_CPU_SH3_UBC_H */

Some files were not shown because too many files have changed in this diff Show More