Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
This commit is contained in:
Linus Torvalds
2005-04-16 15:20:36 -07:00
commit 1da177e4c3
17291 changed files with 6718755 additions and 0 deletions
+35
View File
@@ -0,0 +1,35 @@
#ifndef _ASM_IA64_A_OUT_H
#define _ASM_IA64_A_OUT_H
/*
* No a.out format has been (or should be) defined so this file is
* just a dummy that allows us to get binfmt_elf compiled. It
* probably would be better to clean up binfmt_elf.c so it does not
* necessarily depend on there being a.out support.
*
* Modified 1998-2002
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
#include <linux/types.h>
struct exec {
unsigned long a_info;
unsigned long a_text;
unsigned long a_data;
unsigned long a_bss;
unsigned long a_entry;
};
#define N_TXTADDR(x) 0
#define N_DATADDR(x) 0
#define N_BSSADDR(x) 0
#define N_DRSIZE(x) 0
#define N_TRSIZE(x) 0
#define N_SYMSIZE(x) 0
#define N_TXTOFF(x) 0
#ifdef __KERNEL__
#include <asm/ustack.h>
#endif
#endif /* _ASM_IA64_A_OUT_H */
+17
View File
@@ -0,0 +1,17 @@
/*
* ia64/platform/hp/common/hp_acpi.h
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
* Copyright (C) Bjorn Helgaas
*
* Vendor specific extensions to ACPI.
*/
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#include <linux/types.h>
extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
#endif /* _ASM_IA64_ACPI_EXT_H */
+112
View File
@@ -0,0 +1,112 @@
/*
* asm-ia64/acpi.h
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/numa.h>
#include <asm/system.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
/*
* Calling conventions:
*
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
*/
#define ACPI_SYSTEM_XFACE
#define ACPI_EXTERNAL_XFACE
#define ACPI_INTERNAL_XFACE
#define ACPI_INTERNAL_VAR_XFACE
/* Asm macros */
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() local_irq_disable()
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE()
static inline int
ia64_acpi_acquire_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
ia64_acpi_release_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr))
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr))
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
static inline void disable_acpi(void) { }
const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
#ifdef CONFIG_ACPI_NUMA
/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
#define MAX_PXM_DOMAINS (256)
extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
extern u16 ia64_acpiid_to_sapicid[];
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/
+21
View File
@@ -0,0 +1,21 @@
#ifndef _ASM_IA64_AGP_H
#define _ASM_IA64_AGP_H
/*
* IA-64 specific AGP definitions.
*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
* in coherent mode, which lets us map the AGP memory as normal (write-back) memory
* (unlike x86, where it gets mapped "write-coalescing").
*/
#define map_page_into_agp(page) /* nothing */
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb()
#endif /* _ASM_IA64_AGP_H */
+111
View File
@@ -0,0 +1,111 @@
#ifndef _ASM_IA64_ASMMACRO_H
#define _ASM_IA64_ASMMACRO_H
/*
* Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#define ENTRY(name) \
.align 32; \
.proc name; \
name:
#define ENTRY_MIN_ALIGN(name) \
.align 16; \
.proc name; \
name:
#define GLOBAL_ENTRY(name) \
.global name; \
ENTRY(name)
#define END(name) \
.endp name
/*
* Helper macros to make unwind directives more readable:
*/
/* prologue_gr: */
#define ASM_UNW_PRLG_RP 0x8
#define ASM_UNW_PRLG_PFS 0x4
#define ASM_UNW_PRLG_PSP 0x2
#define ASM_UNW_PRLG_PR 0x1
#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
/*
* Helper macros for accessing user memory.
*/
.section "__ex_table", "a" // declare section & section attributes
.previous
# define EX(y,x...) \
.xdata4 "__ex_table", 99f-., y-.; \
[99:] x
# define EXCLR(y,x...) \
.xdata4 "__ex_table", 99f-., y-.+4; \
[99:] x
/*
* Mark instructions that need a load of a virtual address patched to be
* a load of a physical address. We use this either in critical performance
* path (ivt.S - TLB miss processing) or in places where it might not be
* safe to use a "tpa" instruction (mca_asm.S - error recovery).
*/
.section ".data.patch.vtop", "a" // declare section & section attributes
.previous
#define LOAD_PHYSICAL(pr, reg, obj) \
[1:](pr)movl reg = obj; \
.xdata4 ".data.patch.vtop", 1b-.
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
* we'll patch out the work-around bundles with NOPs, so their impact is minimal.
*/
#define DO_MCKINLEY_E9_WORKAROUND
#ifdef DO_MCKINLEY_E9_WORKAROUND
.section ".data.patch.mckinley_e9", "a"
.previous
/* workaround for Itanium 2 Errata 9: */
# define FSYS_RETURN \
.xdata4 ".data.patch.mckinley_e9", 1f-.; \
1:{ .mib; \
nop.m 0; \
mov r16=ar.pfs; \
br.call.sptk.many b7=2f;; \
}; \
2:{ .mib; \
nop.m 0; \
mov ar.pfs=r16; \
br.ret.sptk.many b6;; \
}
#else
# define FSYS_RETURN br.ret.sptk.many b6
#endif
/*
* Up until early 2004, use of .align within a function caused bad unwind info.
* TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
* otherwise.
*/
#ifdef HAVE_WORKING_TEXT_ALIGN
# define TEXT_ALIGN(n) .align n
#else
# define TEXT_ALIGN(n)
#endif
#ifdef HAVE_SERIALIZE_DIRECTIVE
# define dv_serialize_data .serialize.data
# define dv_serialize_instruction .serialize.instruction
#else
# define dv_serialize_data
# define dv_serialize_instruction
#endif
#endif /* _ASM_IA64_ASMMACRO_H */
+183
View File
@@ -0,0 +1,183 @@
#ifndef _ASM_IA64_ATOMIC_H
#define _ASM_IA64_ATOMIC_H
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* NOTE: don't mess with the types below! The "unsigned long" and
* "int" types were carefully placed so as to ensure proper operation
* of the macros.
*
* Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/types.h>
#include <asm/intrinsics.h>
/*
* On IA-64, counter must always be volatile to ensure that that the
* memory accesses are ordered.
*/
typedef struct { volatile __s32 counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t;
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
#define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
static __inline__ int
ia64_atomic_add (int i, atomic_t *v)
{
__s32 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
static __inline__ int
ia64_atomic64_add (__s64 i, atomic64_t *v)
{
__s64 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old + i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
}
static __inline__ int
ia64_atomic_sub (int i, atomic_t *v)
{
__s32 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
static __inline__ int
ia64_atomic64_sub (__s64 i, atomic64_t *v)
{
__s64 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
}
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic_add(__ia64_aar_i, v); \
})
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
/*
* Atomically add I to V and return TRUE if the resulting value is
* negative.
*/
static __inline__ int
atomic_add_negative (int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
static __inline__ int
atomic64_add_negative (__s64 i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
}
#define atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define atomic64_sub_return(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
#define atomic_add(i,v) atomic_add_return((i), (v))
#define atomic_sub(i,v) atomic_sub_return((i), (v))
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) atomic64_add_return((i), (v))
#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* _ASM_IA64_ATOMIC_H */
+410
View File
@@ -0,0 +1,410 @@
#ifndef _ASM_IA64_BITOPS_H
#define _ASM_IA64_BITOPS_H
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
* scheduler patch
*/
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/bitops.h>
#include <asm/intrinsics.h>
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*
* The address must be (at least) "long" aligned.
* Note that there are driver (e.g., eepro100) which use these operations to operate on
* hw-defined data-structures, so we can't easily change these operations to force a
* bigger alignment.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
static __inline__ void
set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__set_bit (int nr, volatile void *addr)
{
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
/*
* clear_bit() has "acquire" semantics.
*/
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static __inline__ void
clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* __clear_bit - Clears a bit in memory (non-atomic version)
*/
static __inline__ void
__clear_bit (int nr, volatile void *addr)
{
volatile __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
*p &= ~m;
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __inline__ void
change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = (1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old ^ bit;
} while (cmpxchg_acq(m, old, new) != old);
}
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__change_bit (int nr, volatile void *addr)
{
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __inline__ int
test_and_set_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
} while (cmpxchg_acq(m, old, new) != old);
return (old & bit) != 0;
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_set_bit (int nr, volatile void *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
int oldbitset = (*p & m) != 0;
*p |= m;
return oldbitset;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __inline__ int
test_and_clear_bit (int nr, volatile void *addr)
{
__u32 mask, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
} while (cmpxchg_acq(m, old, new) != old);
return (old & ~mask) != 0;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_clear_bit(int nr, volatile void * addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
int oldbitset = *p & m;
*p &= ~m;
return oldbitset;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __inline__ int
test_and_change_bit (int nr, volatile void *addr)
{
__u32 bit, old, new;
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
m = (volatile __u32 *) addr + (nr >> 5);
bit = (1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old ^ bit;
} while (cmpxchg_acq(m, old, new) != old);
return (old & bit) != 0;
}
/*
* WARNING: non atomic version.
*/
static __inline__ int
__test_and_change_bit (int nr, void *addr)
{
__u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5);
old = *m;
*m = old ^ bit;
return (old & bit) != 0;
}
static __inline__ int
test_bit (int nr, const volatile void *addr)
{
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
}
/**
* ffz - find the first zero bit in a long word
* @x: The long word to find the bit in
*
* Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if
* no zero exists, so code should check against ~0UL first...
*/
static inline unsigned long
ffz (unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x & (~x - 1));
return result;
}
/**
* __ffs - find first bit in word.
* @x: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ unsigned long
__ffs (unsigned long x)
{
unsigned long result;
result = ia64_popcnt((x-1) & ~x);
return result;
}
#ifdef __KERNEL__
/*
* find_last_zero_bit - find the last zero bit in a 64 bit quantity
* @x: The value to search
*/
static inline unsigned long
ia64_fls (unsigned long x)
{
long double d = x;
long exp;
exp = ia64_getf_exp(d);
return exp - 0xffff;
}
static inline int
fls (int x)
{
return ia64_fls((unsigned int) x);
}
/*
* ffs: find first bit set. This is defined the same way as the libc and compiler builtin
* ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
* "int" values only and the result value is the bit number + 1. ffs(0) is defined to
* return zero.
*/
#define ffs(x) __builtin_ffs(x)
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
static __inline__ unsigned long
hweight64 (unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x);
return result;
}
#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
#endif /* __KERNEL__ */
extern int __find_next_zero_bit (const void *addr, unsigned long size,
unsigned long offset);
extern int __find_next_bit(const void *addr, unsigned long size,
unsigned long offset);
#define find_next_zero_bit(addr, size, offset) \
__find_next_zero_bit((addr), (size), (offset))
#define find_next_bit(addr, size, offset) \
__find_next_bit((addr), (size), (offset))
/*
* The optimizer actually does good code for this case..
*/
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#ifdef __KERNEL__
#define __clear_bit(nr, addr) clear_bit(nr, addr)
#define ext2_set_bit test_and_set_bit
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
#define ext2_clear_bit test_and_clear_bit
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
#define ext2_test_bit test_bit
#define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit
/* Bitmap functions for the minix filesystem. */
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
#define minix_set_bit(nr,addr) set_bit(nr,addr)
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
#define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
static inline int
sched_find_first_bit (unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return 64 + __ffs(b[1]);
return __ffs(b[2]) + 128;
}
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_BITOPS_H */
+21
View File
@@ -0,0 +1,21 @@
#ifndef _ASM_IA64_BREAK_H
#define _ASM_IA64_BREAK_H
/*
* IA-64 Linux break numbers.
*
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* OS-specific debug break numbers:
*/
#define __IA64_BREAK_KDB 0x80100
/*
* OS-specific break numbers:
*/
#define __IA64_BREAK_SYSCALL 0x100000
#endif /* _ASM_IA64_BREAK_H */
+15
View File
@@ -0,0 +1,15 @@
#ifndef _ASM_IA64_BUG_H
#define _ASM_IA64_BUG_H
#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
# define ia64_abort() __builtin_trap()
#else
# define ia64_abort() (*(volatile int *) 0 = 0)
#endif
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
/* should this BUG should be made generic? */
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
#endif
+19
View File
@@ -0,0 +1,19 @@
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*
* Based on <asm-alpha/bugs.h>.
*
* Modified 1998, 1999, 2003
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
#ifndef _ASM_IA64_BUGS_H
#define _ASM_IA64_BUGS_H
#include <asm/processor.h>
extern void check_bugs (void);
#endif /* _ASM_IA64_BUGS_H */
+42
View File
@@ -0,0 +1,42 @@
#ifndef _ASM_IA64_BYTEORDER_H
#define _ASM_IA64_BYTEORDER_H
/*
* Modified 1998, 1999
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
#include <asm/types.h>
#include <asm/intrinsics.h>
#include <linux/compiler.h>
static __inline__ __attribute_const__ __u64
__ia64_swab64 (__u64 x)
{
__u64 result;
result = ia64_mux1(x, ia64_mux1_rev);
return result;
}
static __inline__ __attribute_const__ __u32
__ia64_swab32 (__u32 x)
{
return __ia64_swab64(x) >> 32;
}
static __inline__ __attribute_const__ __u16
__ia64_swab16(__u16 x)
{
return __ia64_swab64(x) >> 48;
}
#define __arch__swab64(x) __ia64_swab64(x)
#define __arch__swab32(x) __ia64_swab32(x)
#define __arch__swab16(x) __ia64_swab16(x)
#define __BYTEORDER_HAS_U64__
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_IA64_BYTEORDER_H */
+30
View File
@@ -0,0 +1,30 @@
#ifndef _ASM_IA64_CACHE_H
#define _ASM_IA64_CACHE_H
#include <linux/config.h>
/*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/* Bytes per L1 (data) cache line. */
#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#ifdef CONFIG_SMP
# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
# define SMP_CACHE_BYTES L1_CACHE_BYTES
#else
/*
* The "aligned" directive can only _increase_ alignment, so this is
* safe and provides an easy way to avoid wasting space on a
* uni-processor:
*/
# define SMP_CACHE_SHIFT 3
# define SMP_CACHE_BYTES (1 << 3)
#endif
#endif /* _ASM_IA64_CACHE_H */
+50
View File
@@ -0,0 +1,50 @@
#ifndef _ASM_IA64_CACHEFLUSH_H
#define _ASM_IA64_CACHEFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/page-flags.h>
#include <asm/bitops.h>
#include <asm/page.h>
/*
* Cache flushing routines. This is the kind of stuff that can be very expensive, so try
* to avoid them whenever possible.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &(page)->flags); \
} while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* _ASM_IA64_CACHEFLUSH_H */
+76
View File
@@ -0,0 +1,76 @@
#ifndef _ASM_IA64_CHECKSUM_H
#define _ASM_IA64_CHECKSUM_H
/*
* Modified 1998, 1999
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl);
/*
* Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
* checksum, already complemented
*/
extern unsigned short int csum_tcpudp_magic (unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum);
extern unsigned int csum_tcpudp_nofold (unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum);
/*
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern unsigned int csum_partial (const unsigned char * buff, int len,
unsigned int sum);
/*
* Same as csum_partial, but copies from src while it checksums.
*
* Here it is even more important to align src and dst on a 32-bit (or
* even better 64-bit) boundary.
*/
extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
int len, unsigned int sum,
int *errp);
extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
int len, unsigned int sum);
/*
* This routine is used for miscellaneous IP-like checksums, mainly in
* icmp.c
*/
extern unsigned short ip_compute_csum (unsigned char *buff, int len);
/*
* Fold a partial checksum without adding pseudo headers.
*/
static inline unsigned short
csum_fold (unsigned int sum)
{
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return ~sum;
}
#endif /* _ASM_IA64_CHECKSUM_H */
+198
View File
@@ -0,0 +1,198 @@
#ifndef _ASM_IA64_COMPAT_H
#define _ASM_IA64_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_key_t;
typedef s32 compat_pid_t;
typedef u16 compat_uid_t;
typedef u16 compat_gid_t;
typedef u32 compat_uid32_t;
typedef u32 compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
compat_uid_t st_uid;
compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_spare[5];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
compat_uid32_t uid;
compat_gid32_t gid;
compat_uid32_t cuid;
compat_gid32_t cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t unused1;
compat_ulong_t unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __unused1;
compat_time_t sem_ctime;
compat_ulong_t __unused2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __unused1;
compat_time_t msg_rtime;
compat_ulong_t __unused2;
compat_time_t msg_ctime;
compat_ulong_t __unused3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __unused1;
compat_time_t shm_dtime;
compat_ulong_t __unused2;
compat_time_t shm_ctime;
compat_ulong_t __unused3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
/*
* A pointer passed in from user mode. This should not be used for syscall parameters,
* just declare them as pointers because the syscall entry code will have appropriately
* comverted them already.
*/
typedef u32 compat_uptr_t;
static inline void __user *
compat_ptr (compat_uptr_t uptr)
{
return (void __user *) (unsigned long) uptr;
}
static __inline__ void __user *
compat_alloc_user_space (long len)
{
struct pt_regs *regs = ia64_task_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
}
#endif /* _ASM_IA64_COMPAT_H */
+22
View File
@@ -0,0 +1,22 @@
#ifndef _ASM_IA64_CPU_H_
#define _ASM_IA64_CPU_H_
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/percpu.h>
struct ia64_cpu {
struct cpu cpu;
};
DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
DECLARE_PER_CPU(int, cpu_state);
extern int arch_register_cpu(int num);
#ifdef CONFIG_HOTPLUG_CPU
extern void arch_unregister_cpu(int);
#endif
#endif /* _ASM_IA64_CPU_H_ */
+6
View File
@@ -0,0 +1,6 @@
#ifndef __IA64_CPUTIME_H
#define __IA64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __IA64_CPUTIME_H */
+17
View File
@@ -0,0 +1,17 @@
#ifndef _ASM_IA64_CURRENT_H
#define _ASM_IA64_CURRENT_H
/*
* Modified 1998-2000
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
#include <asm/intrinsics.h>
/*
* In kernel mode, thread pointer (r13) is used to point to the current task
* structure.
*/
#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
#endif /* _ASM_IA64_CURRENT_H */
+15
View File
@@ -0,0 +1,15 @@
#ifndef ASM_IA64_CYCLONE_H
#define ASM_IA64_CYCLONE_H
#ifdef CONFIG_IA64_CYCLONE
extern int use_cyclone;
extern void __init cyclone_setup(void);
#else /* CONFIG_IA64_CYCLONE */
#define use_cyclone 0
static inline void cyclone_setup(void)
{
printk(KERN_ERR "Cyclone Counter: System not configured"
" w/ CONFIG_IA64_CYCLONE.\n");
}
#endif /* CONFIG_IA64_CYCLONE */
#endif /* !ASM_IA64_CYCLONE_H */
+97
View File
@@ -0,0 +1,97 @@
#ifndef _ASM_IA64_DELAY_H
#define _ASM_IA64_DELAY_H
/*
* Delay routines using a pre-computed "cycles/usec" value.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/compiler.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
static __inline__ void
ia64_set_itm (unsigned long val)
{
ia64_setreg(_IA64_REG_CR_ITM, val);
ia64_srlz_d();
}
static __inline__ unsigned long
ia64_get_itm (void)
{
unsigned long result;
result = ia64_getreg(_IA64_REG_CR_ITM);
ia64_srlz_d();
return result;
}
static __inline__ void
ia64_set_itv (unsigned long val)
{
ia64_setreg(_IA64_REG_CR_ITV, val);
ia64_srlz_d();
}
static __inline__ unsigned long
ia64_get_itv (void)
{
return ia64_getreg(_IA64_REG_CR_ITV);
}
static __inline__ void
ia64_set_itc (unsigned long val)
{
ia64_setreg(_IA64_REG_AR_ITC, val);
ia64_srlz_d();
}
static __inline__ unsigned long
ia64_get_itc (void)
{
unsigned long result;
result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
#ifdef CONFIG_ITANIUM
while (unlikely((__s32) result == -1)) {
result = ia64_getreg(_IA64_REG_AR_ITC);
ia64_barrier();
}
#endif
return result;
}
extern void ia64_delay_loop (unsigned long loops);
static __inline__ void
__delay (unsigned long loops)
{
if (unlikely(loops < 1))
return;
ia64_delay_loop (loops - 1);
}
static __inline__ void
udelay (unsigned long usecs)
{
unsigned long start = ia64_get_itc();
unsigned long cycles = usecs*local_cpu_data->cyc_per_usec;
while (ia64_get_itc() - start < cycles)
cpu_relax();
}
#endif /* _ASM_IA64_DELAY_H */

Some files were not shown because too many files have changed in this diff Show More