You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'cpumask-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
Conflicts: arch/x86/include/asm/topology.h drivers/oprofile/buffer_sync.c (Both cases: changed in Linus' tree, removed in Ingo's).
This commit is contained in:
@@ -8,6 +8,7 @@
|
||||
#define _ASM_MMZONE_H_
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
/*
|
||||
* generic non-linear memory support:
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
extern struct pglist_data *node_data[];
|
||||
|
||||
#define NODE_DATA(nid) (node_data[nid])
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
extern cpumask_var_t cpu_callin_mask;
|
||||
extern cpumask_var_t cpu_callout_mask;
|
||||
extern cpumask_var_t cpu_initialized_mask;
|
||||
@@ -12,21 +10,5 @@ extern cpumask_var_t cpu_sibling_setup_mask;
|
||||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
extern cpumask_t cpu_callin_map;
|
||||
extern cpumask_t cpu_callout_map;
|
||||
extern cpumask_t cpu_initialized;
|
||||
extern cpumask_t cpu_sibling_setup_map;
|
||||
|
||||
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
|
||||
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
|
||||
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
|
||||
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
|
||||
|
||||
static inline void setup_cpu_local_masks(void) { }
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_CPUMASK_H */
|
||||
|
||||
@@ -109,11 +109,6 @@ static inline int __pcibus_to_node(const struct pci_bus *bus)
|
||||
return sd->node;
|
||||
}
|
||||
|
||||
static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
|
||||
{
|
||||
return node_to_cpumask(__pcibus_to_node(bus));
|
||||
}
|
||||
|
||||
static inline const struct cpumask *
|
||||
cpumask_of_pcibus(const struct pci_bus *bus)
|
||||
{
|
||||
|
||||
@@ -94,7 +94,7 @@ struct cpuinfo_x86 {
|
||||
unsigned long loops_per_jiffy;
|
||||
#ifdef CONFIG_SMP
|
||||
/* cpus sharing the last level cache: */
|
||||
cpumask_t llc_shared_map;
|
||||
cpumask_var_t llc_shared_map;
|
||||
#endif
|
||||
/* cpuid returned max cores value: */
|
||||
u16 x86_max_cores;
|
||||
@@ -736,6 +736,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
|
||||
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
|
||||
|
||||
extern void select_idle_routine(const struct cpuinfo_x86 *c);
|
||||
extern void init_c1e_mask(void);
|
||||
|
||||
extern unsigned long boot_option_idle_override;
|
||||
extern unsigned long idle_halt;
|
||||
|
||||
@@ -21,19 +21,19 @@
|
||||
extern int smp_num_siblings;
|
||||
extern unsigned int num_processors;
|
||||
|
||||
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
|
||||
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
|
||||
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
|
||||
DECLARE_PER_CPU(u16, cpu_llc_id);
|
||||
DECLARE_PER_CPU(int, cpu_number);
|
||||
|
||||
static inline struct cpumask *cpu_sibling_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_sibling_map, cpu);
|
||||
return per_cpu(cpu_sibling_map, cpu);
|
||||
}
|
||||
|
||||
static inline struct cpumask *cpu_core_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_core_map, cpu);
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
|
||||
@@ -121,9 +121,10 @@ static inline void arch_send_call_function_single_ipi(int cpu)
|
||||
smp_ops.send_call_func_single_ipi(cpu);
|
||||
}
|
||||
|
||||
static inline void arch_send_call_function_ipi(cpumask_t mask)
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
smp_ops.send_call_func_ipi(&mask);
|
||||
smp_ops.send_call_func_ipi(mask);
|
||||
}
|
||||
|
||||
void cpu_disable_common(void);
|
||||
|
||||
@@ -44,9 +44,6 @@
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_t node_to_cpumask_map[];
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
extern int cpu_to_node_map[];
|
||||
|
||||
@@ -57,30 +54,8 @@ static inline int cpu_to_node(int cpu)
|
||||
}
|
||||
#define early_cpu_to_node(cpu) cpu_to_node(cpu)
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'.
|
||||
*
|
||||
* Side note: this function creates the returned cpumask on the stack
|
||||
* so with a high NR_CPUS count, excessive stack space is used. The
|
||||
* cpumask_of_node function should be used whenever possible.
|
||||
*/
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'. */
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
static inline void setup_node_to_cpumask_map(void) { }
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_t *node_to_cpumask_map;
|
||||
|
||||
/* Mappings between logical cpu number and node number */
|
||||
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
|
||||
|
||||
@@ -91,8 +66,6 @@ DECLARE_PER_CPU(int, node_number);
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
extern int cpu_to_node(int cpu);
|
||||
extern int early_cpu_to_node(int cpu);
|
||||
extern const cpumask_t *cpumask_of_node(int node);
|
||||
extern cpumask_t node_to_cpumask(int node);
|
||||
|
||||
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
@@ -108,34 +81,25 @@ static inline int early_cpu_to_node(int cpu)
|
||||
return early_per_cpu(x86_cpu_to_node_map, cpu);
|
||||
}
|
||||
|
||||
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'. */
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
extern const struct cpumask *cpumask_of_node(int node);
|
||||
#else
|
||||
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
#endif
|
||||
|
||||
extern void setup_node_to_cpumask_map(void);
|
||||
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = cpumask_of_node(node)
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
* Returns the number of the node containing Node 'node'. This
|
||||
* architecture is flat, so it is a pretty simple function!
|
||||
@@ -143,7 +107,6 @@ extern void setup_node_to_cpumask_map(void);
|
||||
#define parent_node(node) (node)
|
||||
|
||||
#define pcibus_to_node(bus) __pcibus_to_node(bus)
|
||||
#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern unsigned long node_start_pfn[];
|
||||
@@ -209,40 +172,24 @@ static inline int early_cpu_to_node(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return &cpu_online_map;
|
||||
}
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static inline void setup_node_to_cpumask_map(void) { }
|
||||
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = cpumask_of_node(node)
|
||||
#endif
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
|
||||
extern cpumask_t cpu_coregroup_map(int cpu);
|
||||
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
|
||||
/* indicates that pointers to the topology cpumask_t maps are valid */
|
||||
#define arch_provides_topology_pointers yes
|
||||
@@ -256,7 +203,7 @@ struct pci_bus;
|
||||
void set_pci_bus_resources_arch_default(struct pci_bus *b);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
|
||||
#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
|
||||
#define smt_capable() (smp_num_siblings > 1)
|
||||
#endif
|
||||
|
||||
|
||||
@@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const cpumask_t *bigsmp_target_cpus(void)
|
||||
static const struct cpumask *bigsmp_target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return &cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return &cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
}
|
||||
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
|
||||
return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
|
||||
}
|
||||
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
@@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
|
||||
{ } /* NULL entry stops DMI scanning */
|
||||
};
|
||||
|
||||
static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpus_clear(*retmask);
|
||||
cpu_set(cpu, *retmask);
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static int probe_bigsmp(void)
|
||||
|
||||
@@ -410,7 +410,7 @@ static void es7000_enable_apic_mode(void)
|
||||
WARN(1, "Command failed, status = %x\n", mip_status);
|
||||
}
|
||||
|
||||
static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@@ -420,7 +420,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
|
||||
@@ -455,14 +456,14 @@ static int es7000_apic_id_registered(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const cpumask_t *target_cpus_cluster(void)
|
||||
static const struct cpumask *target_cpus_cluster(void)
|
||||
{
|
||||
return &CPU_MASK_ALL;
|
||||
return cpu_all_mask;
|
||||
}
|
||||
|
||||
static const cpumask_t *es7000_target_cpus(void)
|
||||
static const struct cpumask *es7000_target_cpus(void)
|
||||
{
|
||||
return &cpumask_of_cpu(smp_processor_id());
|
||||
return cpumask_of(smp_processor_id());
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
@@ -517,7 +518,7 @@ static void es7000_setup_apic_routing(void)
|
||||
"Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||
(apic_version[apic] == 0x14) ?
|
||||
"Physical Cluster" : "Logical Cluster",
|
||||
nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
|
||||
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
|
||||
}
|
||||
|
||||
static int es7000_apicid_to_node(int logical_apicid)
|
||||
@@ -572,7 +573,7 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
unsigned int round = 0;
|
||||
int cpu, uninitialized_var(apicid);
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
int unknown_nmi_panic;
|
||||
int nmi_watchdog_enabled;
|
||||
|
||||
static cpumask_t backtrace_mask = CPU_MASK_NONE;
|
||||
static cpumask_var_t backtrace_mask;
|
||||
|
||||
/* nmi_active:
|
||||
* >0: the lapic NMI watchdog is active, but can be disabled
|
||||
@@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void)
|
||||
if (!prev_nmi_count)
|
||||
goto error;
|
||||
|
||||
alloc_cpumask_var(&backtrace_mask, GFP_KERNEL);
|
||||
printk(KERN_INFO "Testing NMI watchdog ... ");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||
touched = 1;
|
||||
}
|
||||
|
||||
if (cpu_isset(cpu, backtrace_mask)) {
|
||||
if (cpumask_test_cpu(cpu, backtrace_mask)) {
|
||||
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
|
||||
|
||||
spin_lock(&lock);
|
||||
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
|
||||
dump_stack();
|
||||
spin_unlock(&lock);
|
||||
cpu_clear(cpu, backtrace_mask);
|
||||
cpumask_clear_cpu(cpu, backtrace_mask);
|
||||
}
|
||||
|
||||
/* Could check oops_in_progress here too, but it's safer not to */
|
||||
@@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
backtrace_mask = cpu_online_map;
|
||||
cpumask_copy(backtrace_mask, cpu_online_mask);
|
||||
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
||||
for (i = 0; i < 10 * 1000; i++) {
|
||||
if (cpus_empty(backtrace_mask))
|
||||
if (cpumask_empty(backtrace_mask))
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
|
||||
@@ -334,9 +334,9 @@ static inline void numaq_smp_callin_clear_local_apic(void)
|
||||
clear_local_APIC();
|
||||
}
|
||||
|
||||
static inline const cpumask_t *numaq_target_cpus(void)
|
||||
static inline const struct cpumask *numaq_target_cpus(void)
|
||||
{
|
||||
return &CPU_MASK_ALL;
|
||||
return cpu_all_mask;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
@@ -427,7 +427,7 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
* We use physical apicids here, not logical, so just return the default
|
||||
* physical broadcast to stop people from breaking us
|
||||
*/
|
||||
static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return 0x0F;
|
||||
}
|
||||
@@ -462,7 +462,7 @@ static int probe_numaq(void)
|
||||
return found_numaq;
|
||||
}
|
||||
|
||||
static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@@ -472,7 +472,8 @@ static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
static void numaq_setup_portio_remap(void)
|
||||
|
||||
@@ -83,7 +83,8 @@ static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
/* should be called last. */
|
||||
|
||||
@@ -53,23 +53,19 @@ static unsigned summit_get_apic_id(unsigned long x)
|
||||
return (x >> 24) & 0xFF;
|
||||
}
|
||||
|
||||
static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
|
||||
static inline void summit_send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
default_send_IPI_mask_sequence_logical(mask, vector);
|
||||
}
|
||||
|
||||
static void summit_send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
summit_send_IPI_mask(&mask, vector);
|
||||
default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static void summit_send_IPI_all(int vector)
|
||||
{
|
||||
summit_send_IPI_mask(&cpu_online_map, vector);
|
||||
summit_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#include <asm/tsc.h>
|
||||
@@ -186,13 +182,13 @@ static inline int is_WPEG(struct rio_detail *rio){
|
||||
|
||||
#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static const cpumask_t *summit_target_cpus(void)
|
||||
static const struct cpumask *summit_target_cpus(void)
|
||||
{
|
||||
/* CPU_MASK_ALL (0xff) has undefined behaviour with
|
||||
* dest_LowestPrio mode logical clustered apic interrupt routing
|
||||
* Just start on cpu 0. IRQ balancing will spread load
|
||||
*/
|
||||
return &cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
}
|
||||
|
||||
static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
@@ -289,7 +285,7 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
unsigned int round = 0;
|
||||
int cpu, apicid = 0;
|
||||
@@ -346,7 +342,7 @@ static int probe_summit(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@@ -356,7 +352,8 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_SUMMIT_NUMA
|
||||
|
||||
+158
-116
@@ -466,7 +466,7 @@ static const lookup_t error_table[] = {
|
||||
* @err: APM BIOS return code
|
||||
*
|
||||
* Write a meaningful log entry to the kernel log in the event of
|
||||
* an APM error.
|
||||
* an APM error. Note that this also handles (negative) kernel errors.
|
||||
*/
|
||||
|
||||
static void apm_error(char *str, int err)
|
||||
@@ -478,42 +478,13 @@ static void apm_error(char *str, int err)
|
||||
break;
|
||||
if (i < ERROR_COUNT)
|
||||
printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg);
|
||||
else if (err < 0)
|
||||
printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err);
|
||||
else
|
||||
printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n",
|
||||
str, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock APM functionality to physical CPU 0
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static cpumask_t apm_save_cpus(void)
|
||||
{
|
||||
cpumask_t x = current->cpus_allowed;
|
||||
/* Some bioses don't like being called from CPU != 0 */
|
||||
set_cpus_allowed(current, cpumask_of_cpu(0));
|
||||
BUG_ON(smp_processor_id() != 0);
|
||||
return x;
|
||||
}
|
||||
|
||||
static inline void apm_restore_cpus(cpumask_t mask)
|
||||
{
|
||||
set_cpus_allowed(current, mask);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* No CPU lockdown needed on a uniprocessor
|
||||
*/
|
||||
|
||||
#define apm_save_cpus() (current->cpus_allowed)
|
||||
#define apm_restore_cpus(x) (void)(x)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These are the actual BIOS calls. Depending on APM_ZERO_SEGS and
|
||||
* apm_info.allow_ints, we are being really paranoid here! Not only
|
||||
@@ -568,16 +539,23 @@ static inline void apm_irq_restore(unsigned long flags)
|
||||
# define APM_DO_RESTORE_SEGS
|
||||
#endif
|
||||
|
||||
struct apm_bios_call {
|
||||
u32 func;
|
||||
/* In and out */
|
||||
u32 ebx;
|
||||
u32 ecx;
|
||||
/* Out only */
|
||||
u32 eax;
|
||||
u32 edx;
|
||||
u32 esi;
|
||||
|
||||
/* Error: -ENOMEM, or bits 8-15 of eax */
|
||||
int err;
|
||||
};
|
||||
|
||||
/**
|
||||
* apm_bios_call - Make an APM BIOS 32bit call
|
||||
* @func: APM function to execute
|
||||
* @ebx_in: EBX register for call entry
|
||||
* @ecx_in: ECX register for call entry
|
||||
* @eax: EAX register return
|
||||
* @ebx: EBX register return
|
||||
* @ecx: ECX register return
|
||||
* @edx: EDX register return
|
||||
* @esi: ESI register return
|
||||
* __apm_bios_call - Make an APM BIOS 32bit call
|
||||
* @_call: pointer to struct apm_bios_call.
|
||||
*
|
||||
* Make an APM call using the 32bit protected mode interface. The
|
||||
* caller is responsible for knowing if APM BIOS is configured and
|
||||
@@ -586,35 +564,109 @@ static inline void apm_irq_restore(unsigned long flags)
|
||||
* flag is loaded into AL. If there is an error, then the error
|
||||
* code is returned in AH (bits 8-15 of eax) and this function
|
||||
* returns non-zero.
|
||||
*
|
||||
* Note: this makes the call on the current CPU.
|
||||
*/
|
||||
|
||||
static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
|
||||
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi)
|
||||
static long __apm_bios_call(void *_call)
|
||||
{
|
||||
APM_DECL_SEGS
|
||||
unsigned long flags;
|
||||
cpumask_t cpus;
|
||||
int cpu;
|
||||
struct desc_struct save_desc_40;
|
||||
struct desc_struct *gdt;
|
||||
|
||||
cpus = apm_save_cpus();
|
||||
struct apm_bios_call *call = _call;
|
||||
|
||||
cpu = get_cpu();
|
||||
BUG_ON(cpu != 0);
|
||||
gdt = get_cpu_gdt_table(cpu);
|
||||
save_desc_40 = gdt[0x40 / 8];
|
||||
gdt[0x40 / 8] = bad_bios_desc;
|
||||
|
||||
apm_irq_save(flags);
|
||||
APM_DO_SAVE_SEGS;
|
||||
apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
|
||||
apm_bios_call_asm(call->func, call->ebx, call->ecx,
|
||||
&call->eax, &call->ebx, &call->ecx, &call->edx,
|
||||
&call->esi);
|
||||
APM_DO_RESTORE_SEGS;
|
||||
apm_irq_restore(flags);
|
||||
gdt[0x40 / 8] = save_desc_40;
|
||||
put_cpu();
|
||||
apm_restore_cpus(cpus);
|
||||
|
||||
return *eax & 0xff;
|
||||
return call->eax & 0xff;
|
||||
}
|
||||
|
||||
/* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */
|
||||
static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Don't bother with work_on_cpu in the common case, so we don't
|
||||
* have to worry about OOM or overhead. */
|
||||
if (get_cpu() == 0) {
|
||||
ret = fn(call);
|
||||
put_cpu();
|
||||
} else {
|
||||
put_cpu();
|
||||
ret = work_on_cpu(0, fn, call);
|
||||
}
|
||||
|
||||
/* work_on_cpu can fail with -ENOMEM */
|
||||
if (ret < 0)
|
||||
call->err = ret;
|
||||
else
|
||||
call->err = (call->eax >> 8) & 0xff;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* apm_bios_call - Make an APM BIOS 32bit call (on CPU 0)
|
||||
* @call: the apm_bios_call registers.
|
||||
*
|
||||
* If there is an error, it is returned in @call.err.
|
||||
*/
|
||||
static int apm_bios_call(struct apm_bios_call *call)
|
||||
{
|
||||
return on_cpu0(__apm_bios_call, call);
|
||||
}
|
||||
|
||||
/**
|
||||
* __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0)
|
||||
* @_call: pointer to struct apm_bios_call.
|
||||
*
|
||||
* Make a BIOS call that returns one value only, or just status.
|
||||
* If there is an error, then the error code is returned in AH
|
||||
* (bits 8-15 of eax) and this function returns non-zero (it can
|
||||
* also return -ENOMEM). This is used for simpler BIOS operations.
|
||||
* This call may hold interrupts off for a long time on some laptops.
|
||||
*
|
||||
* Note: this makes the call on the current CPU.
|
||||
*/
|
||||
static long __apm_bios_call_simple(void *_call)
|
||||
{
|
||||
u8 error;
|
||||
APM_DECL_SEGS
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
struct desc_struct save_desc_40;
|
||||
struct desc_struct *gdt;
|
||||
struct apm_bios_call *call = _call;
|
||||
|
||||
cpu = get_cpu();
|
||||
BUG_ON(cpu != 0);
|
||||
gdt = get_cpu_gdt_table(cpu);
|
||||
save_desc_40 = gdt[0x40 / 8];
|
||||
gdt[0x40 / 8] = bad_bios_desc;
|
||||
|
||||
apm_irq_save(flags);
|
||||
APM_DO_SAVE_SEGS;
|
||||
error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
|
||||
&call->eax);
|
||||
APM_DO_RESTORE_SEGS;
|
||||
apm_irq_restore(flags);
|
||||
gdt[0x40 / 8] = save_desc_40;
|
||||
put_cpu();
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -623,40 +675,28 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
|
||||
* @ebx_in: EBX register value for BIOS call
|
||||
* @ecx_in: ECX register value for BIOS call
|
||||
* @eax: EAX register on return from the BIOS call
|
||||
* @err: bits
|
||||
*
|
||||
* Make a BIOS call that returns one value only, or just status.
|
||||
* If there is an error, then the error code is returned in AH
|
||||
* (bits 8-15 of eax) and this function returns non-zero. This is
|
||||
* used for simpler BIOS operations. This call may hold interrupts
|
||||
* off for a long time on some laptops.
|
||||
* If there is an error, then the error code is returned in @err
|
||||
* and this function returns non-zero. This is used for simpler
|
||||
* BIOS operations. This call may hold interrupts off for a long
|
||||
* time on some laptops.
|
||||
*/
|
||||
|
||||
static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
|
||||
static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax,
|
||||
int *err)
|
||||
{
|
||||
u8 error;
|
||||
APM_DECL_SEGS
|
||||
unsigned long flags;
|
||||
cpumask_t cpus;
|
||||
int cpu;
|
||||
struct desc_struct save_desc_40;
|
||||
struct desc_struct *gdt;
|
||||
struct apm_bios_call call;
|
||||
int ret;
|
||||
|
||||
cpus = apm_save_cpus();
|
||||
call.func = func;
|
||||
call.ebx = ebx_in;
|
||||
call.ecx = ecx_in;
|
||||
|
||||
cpu = get_cpu();
|
||||
gdt = get_cpu_gdt_table(cpu);
|
||||
save_desc_40 = gdt[0x40 / 8];
|
||||
gdt[0x40 / 8] = bad_bios_desc;
|
||||
|
||||
apm_irq_save(flags);
|
||||
APM_DO_SAVE_SEGS;
|
||||
error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
|
||||
APM_DO_RESTORE_SEGS;
|
||||
apm_irq_restore(flags);
|
||||
gdt[0x40 / 8] = save_desc_40;
|
||||
put_cpu();
|
||||
apm_restore_cpus(cpus);
|
||||
return error;
|
||||
ret = on_cpu0(__apm_bios_call_simple, &call);
|
||||
*eax = call.eax;
|
||||
*err = call.err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -678,9 +718,10 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
|
||||
static int apm_driver_version(u_short *val)
|
||||
{
|
||||
u32 eax;
|
||||
int err;
|
||||
|
||||
if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax))
|
||||
return (eax >> 8) & 0xff;
|
||||
if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err))
|
||||
return err;
|
||||
*val = eax;
|
||||
return APM_SUCCESS;
|
||||
}
|
||||
@@ -701,22 +742,21 @@ static int apm_driver_version(u_short *val)
|
||||
* that APM 1.2 is in use. If no messges are pending the value 0x80
|
||||
* is returned (No power management events pending).
|
||||
*/
|
||||
|
||||
static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
|
||||
{
|
||||
u32 eax;
|
||||
u32 ebx;
|
||||
u32 ecx;
|
||||
u32 dummy;
|
||||
struct apm_bios_call call;
|
||||
|
||||
if (apm_bios_call(APM_FUNC_GET_EVENT, 0, 0, &eax, &ebx, &ecx,
|
||||
&dummy, &dummy))
|
||||
return (eax >> 8) & 0xff;
|
||||
*event = ebx;
|
||||
call.func = APM_FUNC_GET_EVENT;
|
||||
call.ebx = call.ecx = 0;
|
||||
|
||||
if (apm_bios_call(&call))
|
||||
return call.err;
|
||||
|
||||
*event = call.ebx;
|
||||
if (apm_info.connection_version < 0x0102)
|
||||
*info = ~0; /* indicate info not valid */
|
||||
else
|
||||
*info = ecx;
|
||||
*info = call.ecx;
|
||||
return APM_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -737,9 +777,10 @@ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
|
||||
static int set_power_state(u_short what, u_short state)
|
||||
{
|
||||
u32 eax;
|
||||
int err;
|
||||
|
||||
if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax))
|
||||
return (eax >> 8) & 0xff;
|
||||
if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err))
|
||||
return err;
|
||||
return APM_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -770,6 +811,7 @@ static int apm_do_idle(void)
|
||||
u8 ret = 0;
|
||||
int idled = 0;
|
||||
int polling;
|
||||
int err;
|
||||
|
||||
polling = !!(current_thread_info()->status & TS_POLLING);
|
||||
if (polling) {
|
||||
@@ -782,7 +824,7 @@ static int apm_do_idle(void)
|
||||
}
|
||||
if (!need_resched()) {
|
||||
idled = 1;
|
||||
ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
|
||||
ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err);
|
||||
}
|
||||
if (polling)
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
@@ -797,8 +839,7 @@ static int apm_do_idle(void)
|
||||
* Only report the failure the first 5 times.
|
||||
*/
|
||||
if (++t < 5) {
|
||||
printk(KERN_DEBUG "apm_do_idle failed (%d)\n",
|
||||
(eax >> 8) & 0xff);
|
||||
printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err);
|
||||
t = jiffies;
|
||||
}
|
||||
return -1;
|
||||
@@ -816,9 +857,10 @@ static int apm_do_idle(void)
|
||||
static void apm_do_busy(void)
|
||||
{
|
||||
u32 dummy;
|
||||
int err;
|
||||
|
||||
if (clock_slowed || ALWAYS_CALL_BUSY) {
|
||||
(void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy);
|
||||
(void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err);
|
||||
clock_slowed = 0;
|
||||
}
|
||||
}
|
||||
@@ -937,7 +979,7 @@ static void apm_power_off(void)
|
||||
|
||||
/* Some bioses don't like being called from CPU != 0 */
|
||||
if (apm_info.realmode_power_off) {
|
||||
(void)apm_save_cpus();
|
||||
set_cpus_allowed_ptr(current, cpumask_of(0));
|
||||
machine_real_restart(po_bios_call, sizeof(po_bios_call));
|
||||
} else {
|
||||
(void)set_system_power_state(APM_STATE_OFF);
|
||||
@@ -956,12 +998,13 @@ static void apm_power_off(void)
|
||||
static int apm_enable_power_management(int enable)
|
||||
{
|
||||
u32 eax;
|
||||
int err;
|
||||
|
||||
if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED))
|
||||
return APM_NOT_ENGAGED;
|
||||
if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL,
|
||||
enable, &eax))
|
||||
return (eax >> 8) & 0xff;
|
||||
enable, &eax, &err))
|
||||
return err;
|
||||
if (enable)
|
||||
apm_info.bios.flags &= ~APM_BIOS_DISABLED;
|
||||
else
|
||||
@@ -986,24 +1029,23 @@ static int apm_enable_power_management(int enable)
|
||||
|
||||
static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
|
||||
{
|
||||
u32 eax;
|
||||
u32 ebx;
|
||||
u32 ecx;
|
||||
u32 edx;
|
||||
u32 dummy;
|
||||
struct apm_bios_call call;
|
||||
|
||||
call.func = APM_FUNC_GET_STATUS;
|
||||
call.ebx = APM_DEVICE_ALL;
|
||||
call.ecx = 0;
|
||||
|
||||
if (apm_info.get_power_status_broken)
|
||||
return APM_32_UNSUPPORTED;
|
||||
if (apm_bios_call(APM_FUNC_GET_STATUS, APM_DEVICE_ALL, 0,
|
||||
&eax, &ebx, &ecx, &edx, &dummy))
|
||||
return (eax >> 8) & 0xff;
|
||||
*status = ebx;
|
||||
*bat = ecx;
|
||||
if (apm_bios_call(&call))
|
||||
return call.err;
|
||||
*status = call.ebx;
|
||||
*bat = call.ecx;
|
||||
if (apm_info.get_power_status_swabinminutes) {
|
||||
*life = swab16((u16)edx);
|
||||
*life = swab16((u16)call.edx);
|
||||
*life |= 0x8000;
|
||||
} else
|
||||
*life = edx;
|
||||
*life = call.edx;
|
||||
return APM_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1048,12 +1090,14 @@ static int apm_get_battery_status(u_short which, u_short *status,
|
||||
static int apm_engage_power_management(u_short device, int enable)
|
||||
{
|
||||
u32 eax;
|
||||
int err;
|
||||
|
||||
if ((enable == 0) && (device == APM_DEVICE_ALL)
|
||||
&& (apm_info.bios.flags & APM_BIOS_DISABLED))
|
||||
return APM_DISABLED;
|
||||
if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax))
|
||||
return (eax >> 8) & 0xff;
|
||||
if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable,
|
||||
&eax, &err))
|
||||
return err;
|
||||
if (device == APM_DEVICE_ALL) {
|
||||
if (enable)
|
||||
apm_info.bios.flags &= ~APM_BIOS_DISENGAGED;
|
||||
@@ -1689,16 +1733,14 @@ static int apm(void *unused)
|
||||
char *power_stat;
|
||||
char *bat_stat;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* 2002/08/01 - WT
|
||||
* This is to avoid random crashes at boot time during initialization
|
||||
* on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D.
|
||||
* Some bioses don't like being called from CPU != 0.
|
||||
* Method suggested by Ingo Molnar.
|
||||
*/
|
||||
set_cpus_allowed(current, cpumask_of_cpu(0));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(0));
|
||||
BUG_ON(smp_processor_id() != 0);
|
||||
#endif
|
||||
|
||||
if (apm_info.connection_version == 0) {
|
||||
apm_info.connection_version = apm_info.bios.version;
|
||||
|
||||
@@ -41,8 +41,6 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/* all of these masks are initialized in setup_cpu_local_masks() */
|
||||
cpumask_var_t cpu_initialized_mask;
|
||||
cpumask_var_t cpu_callout_mask;
|
||||
@@ -60,16 +58,6 @@ void __init setup_cpu_local_masks(void)
|
||||
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
cpumask_t cpu_sibling_setup_map;
|
||||
cpumask_t cpu_callout_map;
|
||||
cpumask_t cpu_initialized;
|
||||
cpumask_t cpu_callin_map;
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
|
||||
static const struct cpu_dev *this_cpu __cpuinitdata;
|
||||
|
||||
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
||||
@@ -859,6 +847,7 @@ static void vgetcpu_set_mode(void)
|
||||
void __init identify_boot_cpu(void)
|
||||
{
|
||||
identify_cpu(&boot_cpu_data);
|
||||
init_c1e_mask();
|
||||
#ifdef CONFIG_X86_32
|
||||
sysenter_setup();
|
||||
enable_sep_cpu();
|
||||
|
||||
@@ -211,7 +211,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
|
||||
unsigned int i;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
|
||||
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
|
||||
#endif
|
||||
|
||||
/* Errata workaround */
|
||||
|
||||
@@ -54,7 +54,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
|
||||
static int cpu_family = CPU_OPTERON;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_core_map);
|
||||
static inline const struct cpumask *cpu_core_mask(int cpu)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Return a frequency in MHz, given an input fid */
|
||||
@@ -699,7 +702,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
|
||||
|
||||
dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
|
||||
data->powernow_table = powernow_table;
|
||||
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
|
||||
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
|
||||
print_basics(data);
|
||||
|
||||
for (j = 0; j < data->numps; j++)
|
||||
@@ -862,7 +865,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
|
||||
/* fill in data */
|
||||
data->numps = data->acpi_data.state_count;
|
||||
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
|
||||
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
|
||||
print_basics(data);
|
||||
powernow_k8_acpi_pst_values(data, 0);
|
||||
|
||||
@@ -1300,7 +1303,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
|
||||
else
|
||||
cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
|
||||
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
|
||||
data->available_cores = pol->cpus;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
@@ -1365,7 +1368,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
|
||||
unsigned int khz = 0;
|
||||
unsigned int first;
|
||||
|
||||
first = first_cpu(per_cpu(cpu_core_map, cpu));
|
||||
first = cpumask_first(cpu_core_mask(cpu));
|
||||
data = per_cpu(powernow_data, first);
|
||||
|
||||
if (!data)
|
||||
|
||||
@@ -321,7 +321,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
/* only run on CPU to be set, or on its sibling */
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
|
||||
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
|
||||
#endif
|
||||
|
||||
cpus_allowed = current->cpus_allowed;
|
||||
|
||||
@@ -159,7 +159,7 @@ struct _cpuid4_info_regs {
|
||||
unsigned long can_disable;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
#if defined(CONFIG_PCI) && defined(CONFIG_SYSFS)
|
||||
static struct pci_device_id k8_nb_id[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
|
||||
@@ -324,15 +324,6 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
|
||||
{
|
||||
struct _cpuid4_info_regs *leaf_regs =
|
||||
(struct _cpuid4_info_regs *)this_leaf;
|
||||
|
||||
return cpuid4_cache_lookup_regs(index, leaf_regs);
|
||||
}
|
||||
|
||||
static int __cpuinit find_num_cache_leaves(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
@@ -508,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
return l2;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
/* pointer to _cpuid4_info array (for each cache leaf) */
|
||||
static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
|
||||
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
|
||||
@@ -571,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
|
||||
per_cpu(cpuid4_info, cpu) = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
|
||||
{
|
||||
struct _cpuid4_info_regs *leaf_regs =
|
||||
(struct _cpuid4_info_regs *)this_leaf;
|
||||
|
||||
return cpuid4_cache_lookup_regs(index, leaf_regs);
|
||||
}
|
||||
|
||||
static void __cpuinit get_cpu_leaves(void *_retval)
|
||||
{
|
||||
int j, *retval = _retval, cpu = smp_processor_id();
|
||||
@@ -612,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
|
||||
return retval;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
|
||||
@@ -990,7 +990,7 @@ static struct sysdev_attribute *mce_attributes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static cpumask_t mce_device_initialized = CPU_MASK_NONE;
|
||||
static cpumask_var_t mce_device_initialized;
|
||||
|
||||
/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
|
||||
static __cpuinit int mce_create_device(unsigned int cpu)
|
||||
@@ -1021,7 +1021,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
|
||||
if (err)
|
||||
goto error2;
|
||||
}
|
||||
cpu_set(cpu, mce_device_initialized);
|
||||
cpumask_set_cpu(cpu, mce_device_initialized);
|
||||
|
||||
return 0;
|
||||
error2:
|
||||
@@ -1043,7 +1043,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!cpu_isset(cpu, mce_device_initialized))
|
||||
if (!cpumask_test_cpu(cpu, mce_device_initialized))
|
||||
return;
|
||||
|
||||
for (i = 0; mce_attributes[i]; i++)
|
||||
@@ -1053,7 +1053,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
|
||||
sysdev_remove_file(&per_cpu(device_mce, cpu),
|
||||
&bank_attrs[i]);
|
||||
sysdev_unregister(&per_cpu(device_mce,cpu));
|
||||
cpu_clear(cpu, mce_device_initialized);
|
||||
cpumask_clear_cpu(cpu, mce_device_initialized);
|
||||
}
|
||||
|
||||
/* Make sure there are no machine checks on offlined CPUs. */
|
||||
@@ -1162,6 +1162,8 @@ static __init int mce_init_device(void)
|
||||
if (!mce_available(&boot_cpu_data))
|
||||
return -EIO;
|
||||
|
||||
alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
|
||||
|
||||
err = mce_init_banks();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user