You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git
This commit is contained in:
+1
-1
@@ -329,7 +329,7 @@ menu "Power management and ACPI"
|
||||
|
||||
config PM
|
||||
bool "Power Management support"
|
||||
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB
|
||||
depends on !IA64_HP_SIM
|
||||
default y
|
||||
help
|
||||
"Power Management" means that parts of your computer are shut
|
||||
|
||||
@@ -574,6 +574,8 @@ CONFIG_SERIAL_NONSTANDARD=y
|
||||
# CONFIG_N_HDLC is not set
|
||||
# CONFIG_STALDRV is not set
|
||||
CONFIG_SGI_SNSC=y
|
||||
CONFIG_SGI_TIOCX=y
|
||||
CONFIG_SGI_MBCS=m
|
||||
|
||||
#
|
||||
# Serial drivers
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
/*
|
||||
** IA64 System Bus Adapter (SBA) I/O MMU manager
|
||||
**
|
||||
** (c) Copyright 2002-2004 Alex Williamson
|
||||
** (c) Copyright 2002-2005 Alex Williamson
|
||||
** (c) Copyright 2002-2003 Grant Grundler
|
||||
** (c) Copyright 2002-2004 Hewlett-Packard Company
|
||||
** (c) Copyright 2002-2005 Hewlett-Packard Company
|
||||
**
|
||||
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
|
||||
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
|
||||
@@ -459,21 +459,32 @@ get_iovp_order (unsigned long size)
|
||||
* sba_search_bitmap - find free space in IO PDIR resource bitmap
|
||||
* @ioc: IO MMU structure which owns the pdir we are interested in.
|
||||
* @bits_wanted: number of entries we need.
|
||||
* @use_hint: use res_hint to indicate where to start looking
|
||||
*
|
||||
* Find consecutive free bits in resource bitmap.
|
||||
* Each bit represents one entry in the IO Pdir.
|
||||
* Cool perf optimization: search for log2(size) bits at a time.
|
||||
*/
|
||||
static SBA_INLINE unsigned long
|
||||
sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
|
||||
sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
|
||||
{
|
||||
unsigned long *res_ptr = ioc->res_hint;
|
||||
unsigned long *res_ptr;
|
||||
unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
|
||||
unsigned long pide = ~0UL;
|
||||
unsigned long flags, pide = ~0UL;
|
||||
|
||||
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
|
||||
ASSERT(res_ptr < res_end);
|
||||
|
||||
spin_lock_irqsave(&ioc->res_lock, flags);
|
||||
|
||||
/* Allow caller to force a search through the entire resource space */
|
||||
if (likely(use_hint)) {
|
||||
res_ptr = ioc->res_hint;
|
||||
} else {
|
||||
res_ptr = (ulong *)ioc->res_map;
|
||||
ioc->res_bitshift = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
|
||||
* if a TLB entry is purged while in use. sba_mark_invalid()
|
||||
@@ -570,10 +581,12 @@ not_found:
|
||||
prefetch(ioc->res_map);
|
||||
ioc->res_hint = (unsigned long *) ioc->res_map;
|
||||
ioc->res_bitshift = 0;
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
return (pide);
|
||||
|
||||
found_it:
|
||||
ioc->res_hint = res_ptr;
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
return (pide);
|
||||
}
|
||||
|
||||
@@ -594,36 +607,36 @@ sba_alloc_range(struct ioc *ioc, size_t size)
|
||||
unsigned long itc_start;
|
||||
#endif
|
||||
unsigned long pide;
|
||||
unsigned long flags;
|
||||
|
||||
ASSERT(pages_needed);
|
||||
ASSERT(0 == (size & ~iovp_mask));
|
||||
|
||||
spin_lock_irqsave(&ioc->res_lock, flags);
|
||||
|
||||
#ifdef PDIR_SEARCH_TIMING
|
||||
itc_start = ia64_get_itc();
|
||||
#endif
|
||||
/*
|
||||
** "seek and ye shall find"...praying never hurts either...
|
||||
*/
|
||||
pide = sba_search_bitmap(ioc, pages_needed);
|
||||
pide = sba_search_bitmap(ioc, pages_needed, 1);
|
||||
if (unlikely(pide >= (ioc->res_size << 3))) {
|
||||
pide = sba_search_bitmap(ioc, pages_needed);
|
||||
pide = sba_search_bitmap(ioc, pages_needed, 0);
|
||||
if (unlikely(pide >= (ioc->res_size << 3))) {
|
||||
#if DELAYED_RESOURCE_CNT > 0
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
** With delayed resource freeing, we can give this one more shot. We're
|
||||
** getting close to being in trouble here, so do what we can to make this
|
||||
** one count.
|
||||
*/
|
||||
spin_lock(&ioc->saved_lock);
|
||||
spin_lock_irqsave(&ioc->saved_lock, flags);
|
||||
if (ioc->saved_cnt > 0) {
|
||||
struct sba_dma_pair *d;
|
||||
int cnt = ioc->saved_cnt;
|
||||
|
||||
d = &(ioc->saved[ioc->saved_cnt]);
|
||||
d = &(ioc->saved[ioc->saved_cnt - 1]);
|
||||
|
||||
spin_lock(&ioc->res_lock);
|
||||
while (cnt--) {
|
||||
sba_mark_invalid(ioc, d->iova, d->size);
|
||||
sba_free_range(ioc, d->iova, d->size);
|
||||
@@ -631,10 +644,11 @@ sba_alloc_range(struct ioc *ioc, size_t size)
|
||||
}
|
||||
ioc->saved_cnt = 0;
|
||||
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
|
||||
spin_unlock(&ioc->res_lock);
|
||||
}
|
||||
spin_unlock(&ioc->saved_lock);
|
||||
spin_unlock_irqrestore(&ioc->saved_lock, flags);
|
||||
|
||||
pide = sba_search_bitmap(ioc, pages_needed);
|
||||
pide = sba_search_bitmap(ioc, pages_needed, 0);
|
||||
if (unlikely(pide >= (ioc->res_size << 3)))
|
||||
panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
|
||||
ioc->ioc_hpa);
|
||||
@@ -664,8 +678,6 @@ sba_alloc_range(struct ioc *ioc, size_t size)
|
||||
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
|
||||
ioc->res_bitshift );
|
||||
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
|
||||
return (pide);
|
||||
}
|
||||
|
||||
@@ -950,6 +962,30 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
|
||||
return SBA_IOVA(ioc, iovp, offset);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_MARK_CLEAN
|
||||
static SBA_INLINE void
|
||||
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
|
||||
{
|
||||
u32 iovp = (u32) SBA_IOVP(ioc,iova);
|
||||
int off = PDIR_INDEX(iovp);
|
||||
void *addr;
|
||||
|
||||
if (size <= iovp_size) {
|
||||
addr = phys_to_virt(ioc->pdir_base[off] &
|
||||
~0xE000000000000FFFULL);
|
||||
mark_clean(addr, size);
|
||||
} else {
|
||||
do {
|
||||
addr = phys_to_virt(ioc->pdir_base[off] &
|
||||
~0xE000000000000FFFULL);
|
||||
mark_clean(addr, min(size, iovp_size));
|
||||
off++;
|
||||
size -= iovp_size;
|
||||
} while (size > 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* sba_unmap_single - unmap one IOVA and free resources
|
||||
* @dev: instance of PCI owned by the driver that's asking.
|
||||
@@ -995,6 +1031,10 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
|
||||
size += offset;
|
||||
size = ROUNDUP(size, iovp_size);
|
||||
|
||||
#ifdef ENABLE_MARK_CLEAN
|
||||
if (dir == DMA_FROM_DEVICE)
|
||||
sba_mark_clean(ioc, iova, size);
|
||||
#endif
|
||||
|
||||
#if DELAYED_RESOURCE_CNT > 0
|
||||
spin_lock_irqsave(&ioc->saved_lock, flags);
|
||||
@@ -1021,30 +1061,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
|
||||
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
#endif /* DELAYED_RESOURCE_CNT == 0 */
|
||||
#ifdef ENABLE_MARK_CLEAN
|
||||
if (dir == DMA_FROM_DEVICE) {
|
||||
u32 iovp = (u32) SBA_IOVP(ioc,iova);
|
||||
int off = PDIR_INDEX(iovp);
|
||||
void *addr;
|
||||
|
||||
if (size <= iovp_size) {
|
||||
addr = phys_to_virt(ioc->pdir_base[off] &
|
||||
~0xE000000000000FFFULL);
|
||||
mark_clean(addr, size);
|
||||
} else {
|
||||
size_t byte_cnt = size;
|
||||
|
||||
do {
|
||||
addr = phys_to_virt(ioc->pdir_base[off] &
|
||||
~0xE000000000000FFFULL);
|
||||
mark_clean(addr, min(byte_cnt, iovp_size));
|
||||
off++;
|
||||
byte_cnt -= iovp_size;
|
||||
|
||||
} while (byte_cnt > 0);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -728,12 +728,8 @@ ENTRY(ia64_leave_syscall)
|
||||
mov f8=f0 // clear f8
|
||||
;;
|
||||
ld8 r30=[r2],16 // M0|1 load cr.ifs
|
||||
mov.m ar.ssd=r0 // M2 clear ar.ssd
|
||||
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
|
||||
;;
|
||||
ld8 r25=[r3],16 // M0|1 load ar.unat
|
||||
mov.m ar.csd=r0 // M2 clear ar.csd
|
||||
mov r22=r0 // clear r22
|
||||
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
|
||||
;;
|
||||
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
|
||||
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
|
||||
@@ -756,11 +752,15 @@ ENTRY(ia64_leave_syscall)
|
||||
mov f7=f0 // clear f7
|
||||
;;
|
||||
ld8.fill r12=[r2] // restore r12 (sp)
|
||||
mov.m ar.ssd=r0 // M2 clear ar.ssd
|
||||
mov r22=r0 // clear r22
|
||||
|
||||
ld8.fill r15=[r3] // restore r15
|
||||
(pUStk) st1 [r14]=r17
|
||||
addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
|
||||
;;
|
||||
(pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8
|
||||
(pUStk) st1 [r14]=r17
|
||||
(pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8
|
||||
mov.m ar.csd=r0 // M2 clear ar.csd
|
||||
mov b6=r18 // I0 restore b6
|
||||
;;
|
||||
mov r14=r0 // clear r14
|
||||
|
||||
+275
-89
File diff suppressed because it is too large
Load Diff
@@ -63,20 +63,30 @@ EXPORT_SYMBOL(isa_irq_to_vector_map);
|
||||
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
|
||||
|
||||
int
|
||||
assign_irq_vector (int irq)
|
||||
assign_irq_vector_nopanic (int irq)
|
||||
{
|
||||
int pos, vector;
|
||||
again:
|
||||
pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
|
||||
vector = IA64_FIRST_DEVICE_VECTOR + pos;
|
||||
if (vector > IA64_LAST_DEVICE_VECTOR)
|
||||
/* XXX could look for sharable vectors instead of panic'ing... */
|
||||
panic("assign_irq_vector: out of interrupt vectors!");
|
||||
return -1;
|
||||
if (test_and_set_bit(pos, ia64_vector_mask))
|
||||
goto again;
|
||||
return vector;
|
||||
}
|
||||
|
||||
int
|
||||
assign_irq_vector (int irq)
|
||||
{
|
||||
int vector = assign_irq_vector_nopanic(irq);
|
||||
|
||||
if (vector < 0)
|
||||
panic("assign_irq_vector: out of interrupt vectors!");
|
||||
|
||||
return vector;
|
||||
}
|
||||
|
||||
void
|
||||
free_irq_vector (int vector)
|
||||
{
|
||||
|
||||
+28
-31
@@ -479,14 +479,6 @@ typedef struct {
|
||||
|
||||
#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
|
||||
|
||||
typedef struct {
|
||||
int debug; /* turn on/off debugging via syslog */
|
||||
int debug_ovfl; /* turn on/off debug printk in overflow handler */
|
||||
int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
|
||||
int expert_mode; /* turn on/off value checking */
|
||||
int debug_pfm_read;
|
||||
} pfm_sysctl_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
|
||||
unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
|
||||
@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list);
|
||||
static pmu_config_t *pmu_conf;
|
||||
|
||||
/* sysctl() controls */
|
||||
static pfm_sysctl_t pfm_sysctl;
|
||||
int pfm_debug_var;
|
||||
pfm_sysctl_t pfm_sysctl;
|
||||
EXPORT_SYMBOL(pfm_sysctl);
|
||||
|
||||
static ctl_table pfm_ctl_table[]={
|
||||
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
|
||||
@@ -1576,7 +1568,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
|
||||
goto abort_locked;
|
||||
}
|
||||
|
||||
DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
|
||||
DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
|
||||
|
||||
ret = -EFAULT;
|
||||
if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
|
||||
@@ -3695,8 +3687,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
||||
|
||||
pfm_sysctl.debug = m == 0 ? 0 : 1;
|
||||
|
||||
pfm_debug_var = pfm_sysctl.debug;
|
||||
|
||||
printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
|
||||
|
||||
if (m == 0) {
|
||||
@@ -4996,13 +4986,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
|
||||
|
||||
/*
|
||||
* pfm_handle_work() can be called with interrupts enabled
|
||||
* (TIF_NEED_RESCHED) or disabled. The down_interruptible
|
||||
* call may sleep, therefore we must re-enable interrupts
|
||||
* to avoid deadlocks. It is safe to do so because this function
|
||||
* is called ONLY when returning to user level (PUStk=1), in which case
|
||||
* there is no risk of kernel stack overflow due to deep
|
||||
* interrupt nesting.
|
||||
*/
|
||||
void
|
||||
pfm_handle_work(void)
|
||||
{
|
||||
pfm_context_t *ctx;
|
||||
struct pt_regs *regs;
|
||||
unsigned long flags;
|
||||
unsigned long flags, dummy_flags;
|
||||
unsigned long ovfl_regs;
|
||||
unsigned int reason;
|
||||
int ret;
|
||||
@@ -5039,18 +5037,15 @@ pfm_handle_work(void)
|
||||
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
|
||||
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
|
||||
|
||||
/*
|
||||
* restore interrupt mask to what it was on entry.
|
||||
* Could be enabled/diasbled.
|
||||
*/
|
||||
UNPROTECT_CTX(ctx, flags);
|
||||
|
||||
/*
|
||||
* pfm_handle_work() is currently called with interrupts disabled.
|
||||
* The down_interruptible call may sleep, therefore we
|
||||
* must re-enable interrupts to avoid deadlocks. It is
|
||||
* safe to do so because this function is called ONLY
|
||||
* when returning to user level (PUStk=1), in which case
|
||||
* there is no risk of kernel stack overflow due to deep
|
||||
* interrupt nesting.
|
||||
*/
|
||||
BUG_ON(flags & IA64_PSR_I);
|
||||
/*
|
||||
* force interrupt enable because of down_interruptible()
|
||||
*/
|
||||
local_irq_enable();
|
||||
|
||||
DPRINT(("before block sleeping\n"));
|
||||
@@ -5064,12 +5059,12 @@ pfm_handle_work(void)
|
||||
DPRINT(("after block sleeping ret=%d\n", ret));
|
||||
|
||||
/*
|
||||
* disable interrupts to restore state we had upon entering
|
||||
* this function
|
||||
* lock context and mask interrupts again
|
||||
* We save flags into a dummy because we may have
|
||||
* altered interrupts mask compared to entry in this
|
||||
* function.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
PROTECT_CTX(ctx, flags);
|
||||
PROTECT_CTX(ctx, dummy_flags);
|
||||
|
||||
/*
|
||||
* we need to read the ovfl_regs only after wake-up
|
||||
@@ -5095,7 +5090,9 @@ skip_blocking:
|
||||
ctx->ctx_ovfl_regs[0] = 0UL;
|
||||
|
||||
nothing_to_do:
|
||||
|
||||
/*
|
||||
* restore flags as they were upon entry
|
||||
*/
|
||||
UNPROTECT_CTX(ctx, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -20,24 +20,17 @@ MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
|
||||
MODULE_DESCRIPTION("perfmon default sampling format");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
MODULE_PARM(debug, "i");
|
||||
MODULE_PARM_DESC(debug, "debug");
|
||||
|
||||
MODULE_PARM(debug_ovfl, "i");
|
||||
MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
|
||||
|
||||
|
||||
#define DEFAULT_DEBUG 1
|
||||
|
||||
#ifdef DEFAULT_DEBUG
|
||||
#define DPRINT(a) \
|
||||
do { \
|
||||
if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
|
||||
if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
|
||||
} while (0)
|
||||
|
||||
#define DPRINT_ovfl(a) \
|
||||
do { \
|
||||
if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
|
||||
if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
@@ -45,8 +38,6 @@ MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
|
||||
#define DPRINT_ovfl(a)
|
||||
#endif
|
||||
|
||||
static int debug, debug_ovfl;
|
||||
|
||||
static int
|
||||
default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
|
||||
{
|
||||
|
||||
@@ -4,10 +4,15 @@
|
||||
* Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Stephane Eranian <eranian@hpl.hp.com>
|
||||
* Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
|
||||
* Copyright (C) 2000, 2004 Intel Corp
|
||||
* Rohit Seth <rohit.seth@intel.com>
|
||||
* Suresh Siddha <suresh.b.siddha@intel.com>
|
||||
* Gordon Jin <gordon.jin@intel.com>
|
||||
* Copyright (C) 1999 VA Linux Systems
|
||||
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
|
||||
*
|
||||
* 12/26/04 S.Siddha, G.Jin, R.Seth
|
||||
* Add multi-threading and multi-core detection
|
||||
* 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
|
||||
* 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
|
||||
* 03/31/00 R.Seth cpu_initialized and current->processor fixes
|
||||
@@ -296,6 +301,34 @@ mark_bsp_online (void)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void
|
||||
check_for_logical_procs (void)
|
||||
{
|
||||
pal_logical_to_physical_t info;
|
||||
s64 status;
|
||||
|
||||
status = ia64_pal_logical_to_phys(0, &info);
|
||||
if (status == -1) {
|
||||
printk(KERN_INFO "No logical to physical processor mapping "
|
||||
"available\n");
|
||||
return;
|
||||
}
|
||||
if (status) {
|
||||
printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
|
||||
status);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Total number of siblings that BSP has. Though not all of them
|
||||
* may have booted successfully. The correct number of siblings
|
||||
* booted is in info.overview_num_log.
|
||||
*/
|
||||
smp_num_siblings = info.overview_tpc;
|
||||
smp_num_cpucores = info.overview_cpp;
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init
|
||||
setup_arch (char **cmdline_p)
|
||||
{
|
||||
@@ -356,6 +389,19 @@ setup_arch (char **cmdline_p)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cpu_physical_id(0) = hard_smp_processor_id();
|
||||
|
||||
cpu_set(0, cpu_sibling_map[0]);
|
||||
cpu_set(0, cpu_core_map[0]);
|
||||
|
||||
check_for_logical_procs();
|
||||
if (smp_num_cpucores > 1)
|
||||
printk(KERN_INFO
|
||||
"cpu package is Multi-Core capable: number of cores=%d\n",
|
||||
smp_num_cpucores);
|
||||
if (smp_num_siblings > 1)
|
||||
printk(KERN_INFO
|
||||
"cpu package is Multi-Threading capable: number of siblings=%d\n",
|
||||
smp_num_siblings);
|
||||
#endif
|
||||
|
||||
cpu_init(); /* initialize the bootstrap CPU */
|
||||
@@ -459,12 +505,23 @@ show_cpuinfo (struct seq_file *m, void *v)
|
||||
"cpu regs : %u\n"
|
||||
"cpu MHz : %lu.%06lu\n"
|
||||
"itc MHz : %lu.%06lu\n"
|
||||
"BogoMIPS : %lu.%02lu\n\n",
|
||||
"BogoMIPS : %lu.%02lu\n",
|
||||
cpunum, c->vendor, family, c->model, c->revision, c->archrev,
|
||||
features, c->ppn, c->number,
|
||||
c->proc_freq / 1000000, c->proc_freq % 1000000,
|
||||
c->itc_freq / 1000000, c->itc_freq % 1000000,
|
||||
lpj*HZ/500000, (lpj*HZ/5000) % 100);
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(m, "siblings : %u\n", c->num_log);
|
||||
if (c->threads_per_core > 1 || c->cores_per_socket > 1)
|
||||
seq_printf(m,
|
||||
"physical id: %u\n"
|
||||
"core id : %u\n"
|
||||
"thread id : %u\n",
|
||||
c->socket_id, c->core_id, c->thread_id);
|
||||
#endif
|
||||
seq_printf(m,"\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -533,6 +590,14 @@ identify_cpu (struct cpuinfo_ia64 *c)
|
||||
memcpy(c->vendor, cpuid.field.vendor, 16);
|
||||
#ifdef CONFIG_SMP
|
||||
c->cpu = smp_processor_id();
|
||||
|
||||
/* below default values will be overwritten by identify_siblings()
|
||||
* for Multi-Threading/Multi-Core capable cpu's
|
||||
*/
|
||||
c->threads_per_core = c->cores_per_socket = c->num_log = 1;
|
||||
c->socket_id = -1;
|
||||
|
||||
identify_siblings(c);
|
||||
#endif
|
||||
c->ppn = cpuid.field.ppn;
|
||||
c->number = cpuid.field.number;
|
||||
|
||||
+213
-4
@@ -1,8 +1,13 @@
|
||||
/*
|
||||
* SMP boot-related support
|
||||
*
|
||||
* Copyright (C) 1998-2003 Hewlett-Packard Co
|
||||
* Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 2001, 2004-2005 Intel Corp
|
||||
* Rohit Seth <rohit.seth@intel.com>
|
||||
* Suresh Siddha <suresh.b.siddha@intel.com>
|
||||
* Gordon Jin <gordon.jin@intel.com>
|
||||
* Ashok Raj <ashok.raj@intel.com>
|
||||
*
|
||||
* 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
|
||||
* 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
|
||||
@@ -10,6 +15,11 @@
|
||||
* smp_boot_cpus()/smp_commence() is replaced by
|
||||
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
|
||||
* 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
|
||||
* 04/12/26 Jin Gordon <gordon.jin@intel.com>
|
||||
* 04/12/26 Rohit Seth <rohit.seth@intel.com>
|
||||
* Add multi-threading and multi-core detection
|
||||
* 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
|
||||
* Setup cpu_sibling_map and cpu_core_map
|
||||
*/
|
||||
#include <linux/config.h>
|
||||
|
||||
@@ -122,6 +132,11 @@ EXPORT_SYMBOL(cpu_online_map);
|
||||
cpumask_t cpu_possible_map;
|
||||
EXPORT_SYMBOL(cpu_possible_map);
|
||||
|
||||
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
|
||||
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
|
||||
int smp_num_siblings = 1;
|
||||
int smp_num_cpucores = 1;
|
||||
|
||||
/* which logical CPU number maps to which CPU (physical APIC ID) */
|
||||
volatile int ia64_cpu_to_sapicid[NR_CPUS];
|
||||
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
|
||||
@@ -156,7 +171,8 @@ sync_master (void *arg)
|
||||
local_irq_save(flags);
|
||||
{
|
||||
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
|
||||
while (!go[MASTER]);
|
||||
while (!go[MASTER])
|
||||
cpu_relax();
|
||||
go[MASTER] = 0;
|
||||
go[SLAVE] = ia64_get_itc();
|
||||
}
|
||||
@@ -179,7 +195,8 @@ get_delta (long *rt, long *master)
|
||||
for (i = 0; i < NUM_ITERS; ++i) {
|
||||
t0 = ia64_get_itc();
|
||||
go[MASTER] = 1;
|
||||
while (!(tm = go[SLAVE]));
|
||||
while (!(tm = go[SLAVE]))
|
||||
cpu_relax();
|
||||
go[SLAVE] = 0;
|
||||
t1 = ia64_get_itc();
|
||||
|
||||
@@ -258,7 +275,8 @@ ia64_sync_itc (unsigned int master)
|
||||
return;
|
||||
}
|
||||
|
||||
while (go[MASTER]); /* wait for master to be ready */
|
||||
while (go[MASTER])
|
||||
cpu_relax(); /* wait for master to be ready */
|
||||
|
||||
spin_lock_irqsave(&itc_sync_lock, flags);
|
||||
{
|
||||
@@ -595,7 +613,68 @@ void __devinit smp_prepare_boot_cpu(void)
|
||||
cpu_set(smp_processor_id(), cpu_callin_map);
|
||||
}
|
||||
|
||||
/*
|
||||
* mt_info[] is a temporary store for all info returned by
|
||||
* PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
|
||||
* specific cpu comes.
|
||||
*/
|
||||
static struct {
|
||||
__u32 socket_id;
|
||||
__u16 core_id;
|
||||
__u16 thread_id;
|
||||
__u16 proc_fixed_addr;
|
||||
__u8 valid;
|
||||
}mt_info[NR_CPUS] __devinit;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static inline void
|
||||
remove_from_mtinfo(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_cpu(i)
|
||||
if (mt_info[i].valid && mt_info[i].socket_id ==
|
||||
cpu_data(cpu)->socket_id)
|
||||
mt_info[i].valid = 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
clear_cpu_sibling_map(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_cpu_mask(i, cpu_sibling_map[cpu])
|
||||
cpu_clear(cpu, cpu_sibling_map[i]);
|
||||
for_each_cpu_mask(i, cpu_core_map[cpu])
|
||||
cpu_clear(cpu, cpu_core_map[i]);
|
||||
|
||||
cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
|
||||
}
|
||||
|
||||
static void
|
||||
remove_siblinginfo(int cpu)
|
||||
{
|
||||
int last = 0;
|
||||
|
||||
if (cpu_data(cpu)->threads_per_core == 1 &&
|
||||
cpu_data(cpu)->cores_per_socket == 1) {
|
||||
cpu_clear(cpu, cpu_core_map[cpu]);
|
||||
cpu_clear(cpu, cpu_sibling_map[cpu]);
|
||||
return;
|
||||
}
|
||||
|
||||
last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
|
||||
|
||||
/* remove it from all sibling map's */
|
||||
clear_cpu_sibling_map(cpu);
|
||||
|
||||
/* if this cpu is the last in the core group, remove all its info
|
||||
* from mt_info structure
|
||||
*/
|
||||
if (last)
|
||||
remove_from_mtinfo(cpu);
|
||||
}
|
||||
|
||||
extern void fixup_irqs(void);
|
||||
/* must be called with cpucontrol mutex held */
|
||||
int __cpu_disable(void)
|
||||
@@ -608,6 +687,7 @@ int __cpu_disable(void)
|
||||
if (cpu == 0)
|
||||
return -EBUSY;
|
||||
|
||||
remove_siblinginfo(cpu);
|
||||
fixup_irqs();
|
||||
local_flush_tlb_all();
|
||||
cpu_clear(cpu, cpu_callin_map);
|
||||
@@ -660,6 +740,23 @@ smp_cpus_done (unsigned int dummy)
|
||||
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
|
||||
}
|
||||
|
||||
static inline void __devinit
|
||||
set_cpu_sibling_map(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
|
||||
cpu_set(i, cpu_core_map[cpu]);
|
||||
cpu_set(cpu, cpu_core_map[i]);
|
||||
if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
|
||||
cpu_set(i, cpu_sibling_map[cpu]);
|
||||
cpu_set(cpu, cpu_sibling_map[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int __devinit
|
||||
__cpu_up (unsigned int cpu)
|
||||
{
|
||||
@@ -682,6 +779,15 @@ __cpu_up (unsigned int cpu)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (cpu_data(cpu)->threads_per_core == 1 &&
|
||||
cpu_data(cpu)->cores_per_socket == 1) {
|
||||
cpu_set(cpu, cpu_sibling_map[cpu]);
|
||||
cpu_set(cpu, cpu_core_map[cpu]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -709,3 +815,106 @@ init_smp_config(void)
|
||||
ia64_sal_strerror(sal_ret));
|
||||
}
|
||||
|
||||
static inline int __devinit
|
||||
check_for_mtinfo_index(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_cpu(i)
|
||||
if (!mt_info[i].valid)
|
||||
return i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search the mt_info to find out if this socket's cid/tid information is
|
||||
* cached or not. If the socket exists, fill in the core_id and thread_id
|
||||
* in cpuinfo
|
||||
*/
|
||||
static int __devinit
|
||||
check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
|
||||
{
|
||||
int i;
|
||||
__u32 sid = c->socket_id;
|
||||
|
||||
for_each_cpu(i) {
|
||||
if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
|
||||
&& mt_info[i].socket_id == sid) {
|
||||
c->core_id = mt_info[i].core_id;
|
||||
c->thread_id = mt_info[i].thread_id;
|
||||
return 1; /* not a new socket */
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* identify_siblings(cpu) gets called from identify_cpu. This populates the
|
||||
* information related to logical execution units in per_cpu_data structure.
|
||||
*/
|
||||
void __devinit
|
||||
identify_siblings(struct cpuinfo_ia64 *c)
|
||||
{
|
||||
s64 status;
|
||||
u16 pltid;
|
||||
u64 proc_fixed_addr;
|
||||
int count, i;
|
||||
pal_logical_to_physical_t info;
|
||||
|
||||
if (smp_num_cpucores == 1 && smp_num_siblings == 1)
|
||||
return;
|
||||
|
||||
if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
|
||||
printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
|
||||
status);
|
||||
return;
|
||||
}
|
||||
if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
|
||||
printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
|
||||
return;
|
||||
}
|
||||
if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
|
||||
printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
|
||||
return;
|
||||
}
|
||||
|
||||
c->socket_id = (pltid << 8) | info.overview_ppid;
|
||||
c->cores_per_socket = info.overview_cpp;
|
||||
c->threads_per_core = info.overview_tpc;
|
||||
count = c->num_log = info.overview_num_log;
|
||||
|
||||
/* If the thread and core id information is already cached, then
|
||||
* we will simply update cpu_info and return. Otherwise, we will
|
||||
* do the PAL calls and cache core and thread id's of all the siblings.
|
||||
*/
|
||||
if (check_for_new_socket(proc_fixed_addr, c))
|
||||
return;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
int index;
|
||||
|
||||
if (i && (status = ia64_pal_logical_to_phys(i, &info))
|
||||
!= PAL_STATUS_SUCCESS) {
|
||||
printk(KERN_ERR "ia64_pal_logical_to_phys failed"
|
||||
" with %ld\n", status);
|
||||
return;
|
||||
}
|
||||
if (info.log2_la == proc_fixed_addr) {
|
||||
c->core_id = info.log1_cid;
|
||||
c->thread_id = info.log1_tid;
|
||||
}
|
||||
|
||||
index = check_for_mtinfo_index();
|
||||
/* We will not do the mt_info caching optimization in this case.
|
||||
*/
|
||||
if (index < 0)
|
||||
continue;
|
||||
|
||||
mt_info[index].valid = 1;
|
||||
mt_info[index].socket_id = c->socket_id;
|
||||
mt_info[index].core_id = info.log1_cid;
|
||||
mt_info[index].thread_id = info.log1_tid;
|
||||
mt_info[index].proc_fixed_addr = info.log2_la;
|
||||
}
|
||||
}
|
||||
|
||||
+18
-11
@@ -1943,23 +1943,30 @@ EXPORT_SYMBOL(unw_unwind);
|
||||
int
|
||||
unw_unwind_to_user (struct unw_frame_info *info)
|
||||
{
|
||||
unsigned long ip, sp;
|
||||
unsigned long ip, sp, pr = 0;
|
||||
|
||||
while (unw_unwind(info) >= 0) {
|
||||
if (unw_get_rp(info, &ip) < 0) {
|
||||
unw_get_ip(info, &ip);
|
||||
UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
|
||||
__FUNCTION__, ip);
|
||||
unw_get_sp(info, &sp);
|
||||
if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
|
||||
< IA64_PT_REGS_SIZE) {
|
||||
UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
|
||||
__FUNCTION__);
|
||||
break;
|
||||
}
|
||||
if (unw_is_intr_frame(info) &&
|
||||
(pr & (1UL << PRED_USER_STACK)))
|
||||
return 0;
|
||||
if (unw_get_pr (info, &pr) < 0) {
|
||||
unw_get_rp(info, &ip);
|
||||
UNW_DPRINT(0, "unwind.%s: failed to read "
|
||||
"predicate register (ip=0x%lx)\n",
|
||||
__FUNCTION__, ip);
|
||||
return -1;
|
||||
}
|
||||
unw_get_sp(info, &sp);
|
||||
if (sp >= (unsigned long)info->task + IA64_STK_OFFSET)
|
||||
break;
|
||||
if (ip < FIXADDR_USER_END)
|
||||
return 0;
|
||||
}
|
||||
unw_get_ip(info, &ip);
|
||||
UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
|
||||
UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
|
||||
__FUNCTION__, ip);
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL(unw_unwind_to_user);
|
||||
|
||||
@@ -300,7 +300,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
|
||||
add src_pre_mem=0,src0 // prefetch src pointer
|
||||
add dst_pre_mem=0,dst0 // prefetch dest pointer
|
||||
and src0=-8,src0 // 1st src pointer
|
||||
(p7) mov ar.lc = r21
|
||||
(p7) mov ar.lc = cnt
|
||||
(p8) mov ar.lc = r0
|
||||
;;
|
||||
TEXT_ALIGN(32)
|
||||
|
||||
@@ -61,7 +61,8 @@ show_mem (void)
|
||||
printk("%d reserved pages\n", reserved);
|
||||
printk("%d pages shared\n", shared);
|
||||
printk("%d pages swap cached\n", cached);
|
||||
printk("%ld pages in page table cache\n", pgtable_cache_size);
|
||||
printk("%ld pages in page table cache\n",
|
||||
pgtable_quicklist_total_size());
|
||||
}
|
||||
|
||||
/* physical address where the bootmem map is located */
|
||||
|
||||
@@ -582,7 +582,8 @@ void show_mem(void)
|
||||
printk("%d reserved pages\n", total_reserved);
|
||||
printk("%d pages shared\n", total_shared);
|
||||
printk("%d pages swap cached\n", total_cached);
|
||||
printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
|
||||
printk("Total of %ld pages in page table cache\n",
|
||||
pgtable_quicklist_total_size());
|
||||
printk("%d free buffer pages\n", nr_free_buffer_pages());
|
||||
}
|
||||
|
||||
|
||||
@@ -209,10 +209,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
||||
}
|
||||
|
||||
no_context:
|
||||
if (isr & IA64_ISR_SP) {
|
||||
if ((isr & IA64_ISR_SP)
|
||||
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
|
||||
{
|
||||
/*
|
||||
* This fault was due to a speculative load set the "ed" bit in the psr to
|
||||
* ensure forward progress (target register will get a NaT).
|
||||
* This fault was due to a speculative load or lfetch.fault, set the "ed"
|
||||
* bit in the psr to ensure forward progress. (Target register will get a
|
||||
* NaT for ld.s, lfetch will be canceled.)
|
||||
*/
|
||||
ia64_psr(regs)->ed = 1;
|
||||
return;
|
||||
|
||||
+49
-29
@@ -39,6 +39,9 @@
|
||||
|
||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
|
||||
DEFINE_PER_CPU(long, __pgtable_quicklist_size);
|
||||
|
||||
extern void ia64_tlb_init (void);
|
||||
|
||||
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
|
||||
@@ -50,27 +53,53 @@ struct page *vmem_map;
|
||||
EXPORT_SYMBOL(vmem_map);
|
||||
#endif
|
||||
|
||||
static int pgt_cache_water[2] = { 25, 50 };
|
||||
|
||||
struct page *zero_page_memmap_ptr; /* map entry for zero page */
|
||||
struct page *zero_page_memmap_ptr; /* map entry for zero page */
|
||||
EXPORT_SYMBOL(zero_page_memmap_ptr);
|
||||
|
||||
void
|
||||
check_pgt_cache (void)
|
||||
{
|
||||
int low, high;
|
||||
#define MIN_PGT_PAGES 25UL
|
||||
#define MAX_PGT_FREES_PER_PASS 16L
|
||||
#define PGT_FRACTION_OF_NODE_MEM 16
|
||||
|
||||
low = pgt_cache_water[0];
|
||||
high = pgt_cache_water[1];
|
||||
static inline long
|
||||
max_pgt_pages(void)
|
||||
{
|
||||
u64 node_free_pages, max_pgt_pages;
|
||||
|
||||
#ifndef CONFIG_NUMA
|
||||
node_free_pages = nr_free_pages();
|
||||
#else
|
||||
node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
|
||||
#endif
|
||||
max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
|
||||
max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
|
||||
return max_pgt_pages;
|
||||
}
|
||||
|
||||
static inline long
|
||||
min_pages_to_free(void)
|
||||
{
|
||||
long pages_to_free;
|
||||
|
||||
pages_to_free = pgtable_quicklist_size - max_pgt_pages();
|
||||
pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
|
||||
return pages_to_free;
|
||||
}
|
||||
|
||||
void
|
||||
check_pgt_cache(void)
|
||||
{
|
||||
long pages_to_free;
|
||||
|
||||
if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
if (pgtable_cache_size > (u64) high) {
|
||||
do {
|
||||
if (pgd_quicklist)
|
||||
free_page((unsigned long)pgd_alloc_one_fast(NULL));
|
||||
if (pmd_quicklist)
|
||||
free_page((unsigned long)pmd_alloc_one_fast(NULL, 0));
|
||||
} while (pgtable_cache_size > (u64) low);
|
||||
while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
|
||||
while (pages_to_free--) {
|
||||
free_page((unsigned long)pgtable_quicklist_alloc());
|
||||
}
|
||||
preempt_enable();
|
||||
preempt_disable();
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
@@ -523,11 +552,14 @@ void
|
||||
mem_init (void)
|
||||
{
|
||||
long reserved_pages, codesize, datasize, initsize;
|
||||
unsigned long num_pgt_pages;
|
||||
pg_data_t *pgdat;
|
||||
int i;
|
||||
static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
|
||||
|
||||
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
|
||||
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
|
||||
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/*
|
||||
* This needs to be called _after_ the command line has been parsed but _before_
|
||||
@@ -564,18 +596,6 @@ mem_init (void)
|
||||
num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
|
||||
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
|
||||
|
||||
/*
|
||||
* Allow for enough (cached) page table pages so that we can map the entire memory
|
||||
* at least once. Each task also needs a couple of page tables pages, so add in a
|
||||
* fudge factor for that (don't use "threads-max" here; that would be wrong!).
|
||||
* Don't allow the cache to be more than 10% of total memory, though.
|
||||
*/
|
||||
# define NUM_TASKS 500 /* typical number of tasks */
|
||||
num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
|
||||
if (num_pgt_pages > nr_free_pages() / 10)
|
||||
num_pgt_pages = nr_free_pages() / 10;
|
||||
if (num_pgt_pages > (u64) pgt_cache_water[1])
|
||||
pgt_cache_water[1] = num_pgt_pages;
|
||||
|
||||
/*
|
||||
* For fsyscall entrpoints with no light-weight handler, use the ordinary
|
||||
|
||||
@@ -123,9 +123,11 @@ pcibr_lock(struct pcibus_info *pcibus_info)
|
||||
}
|
||||
#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
|
||||
|
||||
extern int pcibr_init_provider(void);
|
||||
extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
|
||||
extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int);
|
||||
extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int);
|
||||
extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t);
|
||||
extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t);
|
||||
extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
|
||||
|
||||
/*
|
||||
* prototypes for the bridge asic register access routines in pcibr_reg.c
|
||||
|
||||
@@ -10,3 +10,4 @@
|
||||
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
|
||||
huberror.o io_init.o iomv.o klconflib.o sn2/
|
||||
obj-$(CONFIG_IA64_GENERIC) += machvec.o
|
||||
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
@@ -170,10 +170,6 @@ retry_bteop:
|
||||
/* Initialize the notification to a known value. */
|
||||
*bte->most_rcnt_na = BTE_WORD_BUSY;
|
||||
|
||||
/* Set the status reg busy bit and transfer length */
|
||||
BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
|
||||
BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
|
||||
|
||||
/* Set the source and destination registers */
|
||||
BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
|
||||
BTE_SRC_STORE(bte, TO_PHYS(src));
|
||||
@@ -188,7 +184,7 @@ retry_bteop:
|
||||
|
||||
/* Initiate the transfer */
|
||||
BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
|
||||
BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
|
||||
BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode));
|
||||
|
||||
itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
|
||||
|
||||
@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
|
||||
mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
|
||||
|
||||
for (i = 0; i < BTES_PER_NODE; i++) {
|
||||
u64 *base_addr;
|
||||
|
||||
/* Which link status register should we use? */
|
||||
unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
|
||||
mynodepda->bte_if[i].bte_base_addr = (u64 *)
|
||||
REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
|
||||
base_addr = (u64 *)
|
||||
REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i));
|
||||
mynodepda->bte_if[i].bte_base_addr = base_addr;
|
||||
mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr);
|
||||
mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr);
|
||||
mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr);
|
||||
mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr);
|
||||
|
||||
/*
|
||||
* Initialize the notification and spinlock
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long);
|
||||
* Wait until all BTE related CRBs are completed
|
||||
* and then reset the interfaces.
|
||||
*/
|
||||
void bte_error_handler(unsigned long _nodepda)
|
||||
void shub1_bte_error_handler(unsigned long _nodepda)
|
||||
{
|
||||
struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
|
||||
spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
|
||||
struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
|
||||
nasid_t nasid;
|
||||
int i;
|
||||
int valid_crbs;
|
||||
unsigned long irq_flags;
|
||||
volatile u64 *notify;
|
||||
bte_result_t bh_error;
|
||||
ii_imem_u_t imem; /* II IMEM Register */
|
||||
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
|
||||
ii_ibcr_u_t ibcr;
|
||||
ii_icmr_u_t icmr;
|
||||
ii_ieclr_u_t ieclr;
|
||||
|
||||
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
|
||||
BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
|
||||
smp_processor_id()));
|
||||
|
||||
spin_lock_irqsave(recovery_lock, irq_flags);
|
||||
|
||||
if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
|
||||
(err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
|
||||
BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
|
||||
smp_processor_id()));
|
||||
spin_unlock_irqrestore(recovery_lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Lock all interfaces on this node to prevent new transfers
|
||||
* from being queued.
|
||||
*/
|
||||
for (i = 0; i < BTES_PER_NODE; i++) {
|
||||
if (err_nodepda->bte_if[i].cleanup_active) {
|
||||
continue;
|
||||
}
|
||||
spin_lock(&err_nodepda->bte_if[i].spinlock);
|
||||
BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
|
||||
smp_processor_id(), i));
|
||||
err_nodepda->bte_if[i].cleanup_active = 1;
|
||||
}
|
||||
|
||||
/* Determine information about our hub */
|
||||
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
|
||||
@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda)
|
||||
mod_timer(recovery_timer, HZ * 5);
|
||||
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
|
||||
smp_processor_id()));
|
||||
spin_unlock_irqrestore(recovery_lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
|
||||
@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda)
|
||||
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
|
||||
err_nodepda, smp_processor_id(),
|
||||
i));
|
||||
spin_unlock_irqrestore(recovery_lock,
|
||||
irq_flags);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda)
|
||||
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
|
||||
REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
|
||||
|
||||
del_timer(recovery_timer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until all BTE related CRBs are completed
|
||||
* and then reset the interfaces.
|
||||
*/
|
||||
void bte_error_handler(unsigned long _nodepda)
|
||||
{
|
||||
struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
|
||||
spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
|
||||
int i;
|
||||
nasid_t nasid;
|
||||
unsigned long irq_flags;
|
||||
volatile u64 *notify;
|
||||
bte_result_t bh_error;
|
||||
|
||||
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
|
||||
smp_processor_id()));
|
||||
|
||||
spin_lock_irqsave(recovery_lock, irq_flags);
|
||||
|
||||
/*
|
||||
* Lock all interfaces on this node to prevent new transfers
|
||||
* from being queued.
|
||||
*/
|
||||
for (i = 0; i < BTES_PER_NODE; i++) {
|
||||
if (err_nodepda->bte_if[i].cleanup_active) {
|
||||
continue;
|
||||
}
|
||||
spin_lock(&err_nodepda->bte_if[i].spinlock);
|
||||
BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
|
||||
smp_processor_id(), i));
|
||||
err_nodepda->bte_if[i].cleanup_active = 1;
|
||||
}
|
||||
|
||||
if (is_shub1()) {
|
||||
shub1_bte_error_handler(_nodepda);
|
||||
} else {
|
||||
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
|
||||
|
||||
if (ia64_sn_bte_recovery(nasid))
|
||||
panic("bte_error_handler(): Fatal BTE Error");
|
||||
}
|
||||
|
||||
for (i = 0; i < BTES_PER_NODE; i++) {
|
||||
bh_error = err_nodepda->bte_if[i].bh_error;
|
||||
if (bh_error != BTE_SUCCESS) {
|
||||
@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda)
|
||||
spin_unlock(&err_nodepda->bte_if[i].spinlock);
|
||||
}
|
||||
|
||||
del_timer(recovery_timer);
|
||||
|
||||
spin_unlock_irqrestore(recovery_lock, irq_flags);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user