mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (22 commits)
x86: Fix code patching for paravirt-alternatives on 486
x86, msr: change msr-reg.o to obj-y, and export its symbols
x86: Use hard_smp_processor_id() to get apic id for AMD K8 cpus
x86, sched: Workaround broken sched domain creation for AMD Magny-Cours
x86, mcheck: Use correct cpumask for shared bank4
x86, cacheinfo: Fixup L3 cache information for AMD multi-node processors
x86: Fix CPU llc_shared_map information for AMD Magny-Cours
x86, msr: Fix msr-reg.S compilation with gas 2.16.1, on 32-bit too
x86: Move kernel_fpu_using to irq_fpu_usable in asm/i387.h
x86, msr: fix msr-reg.S compilation with gas 2.16.1
x86, msr: Export the register-setting MSR functions via /dev/*/msr
x86, msr: Create _on_cpu helpers for {rw,wr}msr_safe_regs()
x86, msr: Have the _safe MSR functions return -EIO, not -EFAULT
x86, msr: CFI annotations, cleanups for msr-reg.S
x86, asm: Make _ASM_EXTABLE() usable from assembly code
x86, asm: Add 32-bit versions of the combined CFI macros
x86, AMD: Disable wrongly set X86_FEATURE_LAHF_LM CPUID bit
x86, msr: Rewrite AMD rd/wrmsr variants
x86, msr: Add rd/wrmsr interfaces with preset registers
x86: add specific support for Intel Atom architecture
...
This commit is contained in:
@@ -121,6 +121,7 @@ Code Seq# Include File Comments
|
||||
'c' 00-7F linux/comstats.h conflict!
|
||||
'c' 00-7F linux/coda.h conflict!
|
||||
'c' 80-9F arch/s390/include/asm/chsc.h
|
||||
'c' A0-AF arch/x86/include/asm/msr.h
|
||||
'd' 00-FF linux/char/drm/drm/h conflict!
|
||||
'd' F0-FF linux/digi1.h
|
||||
'e' all linux/digi1.h conflict!
|
||||
|
||||
@@ -262,6 +262,15 @@ config MCORE2
|
||||
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
|
||||
(not a typo)
|
||||
|
||||
config MATOM
|
||||
bool "Intel Atom"
|
||||
---help---
|
||||
|
||||
Select this for the Intel Atom platform. Intel Atom CPUs have an
|
||||
in-order pipelining architecture and thus can benefit from
|
||||
accordingly optimized code. Use a recent GCC with specific Atom
|
||||
support in order to fully benefit from selecting this option.
|
||||
|
||||
config GENERIC_CPU
|
||||
bool "Generic-x86-64"
|
||||
depends on X86_64
|
||||
@@ -295,7 +304,7 @@ config X86_CPU
|
||||
config X86_L1_CACHE_BYTES
|
||||
int
|
||||
default "128" if MPSC
|
||||
default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
|
||||
default "64" if GENERIC_CPU || MK8 || MCORE2 || MATOM || X86_32
|
||||
|
||||
config X86_INTERNODE_CACHE_BYTES
|
||||
int
|
||||
@@ -310,7 +319,7 @@ config X86_L1_CACHE_SHIFT
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
|
||||
config X86_XADD
|
||||
def_bool y
|
||||
@@ -359,7 +368,7 @@ config X86_INTEL_USERCOPY
|
||||
|
||||
config X86_USE_PPRO_CHECKSUM
|
||||
def_bool y
|
||||
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2
|
||||
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
|
||||
|
||||
config X86_USE_3DNOW
|
||||
def_bool y
|
||||
@@ -387,7 +396,7 @@ config X86_P6_NOP
|
||||
|
||||
config X86_TSC
|
||||
def_bool y
|
||||
depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64
|
||||
depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64
|
||||
|
||||
config X86_CMPXCHG64
|
||||
def_bool y
|
||||
@@ -397,7 +406,7 @@ config X86_CMPXCHG64
|
||||
# generates cmov.
|
||||
config X86_CMOV
|
||||
def_bool y
|
||||
depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64)
|
||||
depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
|
||||
|
||||
config X86_MINIMUM_CPU_FAMILY
|
||||
int
|
||||
|
||||
@@ -55,6 +55,8 @@ else
|
||||
|
||||
cflags-$(CONFIG_MCORE2) += \
|
||||
$(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
|
||||
cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
|
||||
$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
|
||||
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
|
||||
|
||||
@@ -33,6 +33,8 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-f
|
||||
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
|
||||
cflags-$(CONFIG_MVIAC7) += -march=i686
|
||||
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
|
||||
cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
|
||||
$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
|
||||
|
||||
# AMD Elan support
|
||||
cflags-$(CONFIG_X86_ELAN) += -march=i486
|
||||
|
||||
@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
const u8 *in, unsigned int len, u8 *iv);
|
||||
|
||||
static inline int kernel_fpu_using(void)
|
||||
{
|
||||
if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
|
||||
{
|
||||
unsigned long addr = (unsigned long)raw_ctx;
|
||||
@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (kernel_fpu_using())
|
||||
if (irq_fpu_usable())
|
||||
err = crypto_aes_expand_key(ctx, in_key, key_len);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (kernel_fpu_using())
|
||||
if (irq_fpu_usable())
|
||||
crypto_aes_encrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (kernel_fpu_using())
|
||||
if (irq_fpu_usable())
|
||||
crypto_aes_decrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (kernel_fpu_using()) {
|
||||
if (irq_fpu_usable()) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (kernel_fpu_using()) {
|
||||
if (irq_fpu_usable()) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
# define __ASM_FORM(x) x
|
||||
# define __ASM_EX_SEC .section __ex_table
|
||||
# define __ASM_EX_SEC .section __ex_table, "a"
|
||||
#else
|
||||
# define __ASM_FORM(x) " " #x " "
|
||||
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
|
||||
@@ -38,10 +38,18 @@
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
/* Exception table entry */
|
||||
#ifdef __ASSEMBLY__
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
__ASM_EX_SEC ; \
|
||||
_ASM_ALIGN ; \
|
||||
_ASM_PTR from , to ; \
|
||||
.previous
|
||||
#else
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
__ASM_EX_SEC \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR #from "," #to "\n" \
|
||||
" .previous\n"
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_ASM_H */
|
||||
|
||||
@@ -95,6 +95,7 @@
|
||||
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
|
||||
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
|
||||
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
|
||||
|
||||
@@ -87,9 +87,25 @@
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
#else /*!CONFIG_X86_64*/
|
||||
.macro pushl_cfi reg
|
||||
pushl \reg
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
.endm
|
||||
|
||||
/* 32bit defenitions are missed yet */
|
||||
.macro popl_cfi reg
|
||||
popl \reg
|
||||
CFI_ADJUST_CFA_OFFSET -4
|
||||
.endm
|
||||
|
||||
.macro movl_cfi reg offset=0
|
||||
movl %\reg, \offset(%esp)
|
||||
CFI_REL_OFFSET \reg, \offset
|
||||
.endm
|
||||
|
||||
.macro movl_cfi_restore offset reg
|
||||
movl \offset(%esp), %\reg
|
||||
CFI_RESTORE \reg
|
||||
.endm
|
||||
#endif /*!CONFIG_X86_64*/
|
||||
#endif /*__ASSEMBLY__*/
|
||||
|
||||
|
||||
@@ -301,6 +301,14 @@ static inline void kernel_fpu_end(void)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline bool irq_fpu_usable(void)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
|
||||
return !in_interrupt() || !(regs = get_irq_regs()) || \
|
||||
user_mode(regs) || (read_cr0() & X86_CR0_TS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Some instructions like VIA's padlock instructions generate a spurious
|
||||
* DNA fault but don't modify SSE registers. And these instructions
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
#define MODULE_PROC_FAMILY "586MMX "
|
||||
#elif defined CONFIG_MCORE2
|
||||
#define MODULE_PROC_FAMILY "CORE2 "
|
||||
#elif defined CONFIG_MATOM
|
||||
#define MODULE_PROC_FAMILY "ATOM "
|
||||
#elif defined CONFIG_M686
|
||||
#define MODULE_PROC_FAMILY "686 "
|
||||
#elif defined CONFIG_MPENTIUMII
|
||||
|
||||
@@ -3,10 +3,16 @@
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
|
||||
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/cpumask.h>
|
||||
@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr), [fault] "i" (-EFAULT));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
|
||||
int *err)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("2: rdmsr ; xor %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: mov %3,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
|
||||
: "c" (msr), [fault] "i" (-EIO));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: [err] "=a" (err)
|
||||
: "c" (msr), "0" (low), "d" (high),
|
||||
[fault] "i" (-EFAULT)
|
||||
[fault] "i" (-EIO)
|
||||
: "memory");
|
||||
return err;
|
||||
}
|
||||
|
||||
extern unsigned long long native_read_tsc(void);
|
||||
|
||||
extern int native_rdmsr_safe_regs(u32 regs[8]);
|
||||
extern int native_wrmsr_safe_regs(u32 regs[8]);
|
||||
|
||||
static __always_inline unsigned long long __native_read_tsc(void)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
*p = native_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
int err;
|
||||
|
||||
*p = native_read_msr_amd_safe(msr, &err);
|
||||
gprs[1] = msr;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
err = native_rdmsr_safe_regs(gprs);
|
||||
|
||||
*p = gprs[0] | ((u64)gprs[2] << 32);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
|
||||
gprs[0] = (u32)val;
|
||||
gprs[1] = msr;
|
||||
gprs[2] = val >> 32;
|
||||
gprs[7] = 0x9c5a203a;
|
||||
|
||||
return native_wrmsr_safe_regs(gprs);
|
||||
}
|
||||
|
||||
static inline int rdmsr_safe_regs(u32 regs[8])
|
||||
{
|
||||
return native_rdmsr_safe_regs(regs);
|
||||
}
|
||||
|
||||
static inline int wrmsr_safe_regs(u32 regs[8])
|
||||
{
|
||||
return native_wrmsr_safe_regs(regs);
|
||||
}
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)__native_read_tsc())
|
||||
|
||||
@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
|
||||
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
return rdmsr_safe_regs(regs);
|
||||
}
|
||||
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
return wrmsr_safe_regs(regs);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_MSR_H */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
721
arch/x86/include/asm/paravirt_types.h
Normal file
721
arch/x86/include/asm/paravirt_types.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -713,13 +713,23 @@ static inline void cpu_relax(void)
|
||||
rep_nop();
|
||||
}
|
||||
|
||||
/* Stop speculative execution: */
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
#if defined(CONFIG_M386) || defined(CONFIG_M486)
|
||||
if (boot_cpu_data.x86 < 5)
|
||||
/* There is no speculative execution.
|
||||
* jmp is a barrier to prefetching. */
|
||||
asm volatile("jmp 1f\n1:\n" ::: "memory");
|
||||
else
|
||||
#endif
|
||||
/* cpuid is a barrier to speculative execution.
|
||||
* Prefetched instructions are automatically
|
||||
* invalidated when modified. */
|
||||
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
}
|
||||
|
||||
static inline void __monitor(const void *eax, unsigned long ecx,
|
||||
|
||||
@@ -498,8 +498,8 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
memcpy(addr, opcode, len);
|
||||
local_irq_restore(flags);
|
||||
sync_core();
|
||||
local_irq_restore(flags);
|
||||
/* Could also do a CLFLUSH here to speed up CPU recovery; but
|
||||
that causes hangs on some VIA CPUs. */
|
||||
return addr;
|
||||
|
||||
@@ -252,6 +252,64 @@ static int __cpuinit nearby_node(int apicid)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fixup core topology information for AMD multi-node processors.
|
||||
* Assumption 1: Number of cores in each internal node is the same.
|
||||
* Assumption 2: Mixed systems with both single-node and dual-node
|
||||
* processors are not supported.
|
||||
*/
|
||||
#ifdef CONFIG_X86_HT
|
||||
static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
u32 t, cpn;
|
||||
u8 n, n_id;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* fixup topology information only once for a core */
|
||||
if (cpu_has(c, X86_FEATURE_AMD_DCM))
|
||||
return;
|
||||
|
||||
/* check for multi-node processor on boot cpu */
|
||||
t = read_pci_config(0, 24, 3, 0xe8);
|
||||
if (!(t & (1 << 29)))
|
||||
return;
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
|
||||
|
||||
/* cores per node: each internal node has half the number of cores */
|
||||
cpn = c->x86_max_cores >> 1;
|
||||
|
||||
/* even-numbered NB_id of this dual-node processor */
|
||||
n = c->phys_proc_id << 1;
|
||||
|
||||
/*
|
||||
* determine internal node id and assign cores fifty-fifty to
|
||||
* each node of the dual-node processor
|
||||
*/
|
||||
t = read_pci_config(0, 24 + n, 3, 0xe8);
|
||||
n = (t>>30) & 0x3;
|
||||
if (n == 0) {
|
||||
if (c->cpu_core_id < cpn)
|
||||
n_id = 0;
|
||||
else
|
||||
n_id = 1;
|
||||
} else {
|
||||
if (c->cpu_core_id < cpn)
|
||||
n_id = 1;
|
||||
else
|
||||
n_id = 0;
|
||||
}
|
||||
|
||||
/* compute entire NodeID, use llc_shared_map to store sibling info */
|
||||
per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
|
||||
|
||||
/* fixup core id to be in range from 0 to cpn */
|
||||
c->cpu_core_id = c->cpu_core_id % cpn;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On a AMD dual core setup the lower bits of the APIC id distingush the cores.
|
||||
* Assumes number of cores is a power of two.
|
||||
@@ -269,6 +327,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
|
||||
c->phys_proc_id = c->initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
|
||||
/* fixup topology information on multi-node processors */
|
||||
if ((c->x86 == 0x10) && (c->x86_model == 9))
|
||||
amd_fixup_dcm(c);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -277,9 +338,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
|
||||
unsigned apicid = c->apicid;
|
||||
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
|
||||
node = c->phys_proc_id;
|
||||
if (apicid_to_node[apicid] != NUMA_NO_NODE)
|
||||
node = apicid_to_node[apicid];
|
||||
if (!node_online(node)) {
|
||||
@@ -406,12 +468,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
/*
|
||||
* Some BIOSes incorrectly force this feature, but only K8
|
||||
* revision D (model = 0x14) and later actually support it.
|
||||
* (AMD Erratum #110, docId: 25759).
|
||||
*/
|
||||
if (c->x86_model < 0x14)
|
||||
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
|
||||
u64 val;
|
||||
|
||||
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
|
||||
if (!rdmsrl_amd_safe(0xc001100d, &val)) {
|
||||
val &= ~(1ULL << 32);
|
||||
wrmsrl_amd_safe(0xc001100d, val);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if (c->x86 == 0x10 || c->x86 == 0x11)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
/* get apicid instead of initial apic id from cpuid */
|
||||
c->apicid = hard_smp_processor_id();
|
||||
#else
|
||||
|
||||
/*
|
||||
|
||||
@@ -241,7 +241,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
||||
case 0:
|
||||
if (!l1->val)
|
||||
return;
|
||||
assoc = l1->assoc;
|
||||
assoc = assocs[l1->assoc];
|
||||
line_size = l1->line_size;
|
||||
lines_per_tag = l1->lines_per_tag;
|
||||
size_in_kb = l1->size_in_kb;
|
||||
@@ -249,7 +249,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
||||
case 2:
|
||||
if (!l2.val)
|
||||
return;
|
||||
assoc = l2.assoc;
|
||||
assoc = assocs[l2.assoc];
|
||||
line_size = l2.line_size;
|
||||
lines_per_tag = l2.lines_per_tag;
|
||||
/* cpu_data has errata corrections for K7 applied */
|
||||
@@ -258,10 +258,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
||||
case 3:
|
||||
if (!l3.val)
|
||||
return;
|
||||
assoc = l3.assoc;
|
||||
assoc = assocs[l3.assoc];
|
||||
line_size = l3.line_size;
|
||||
lines_per_tag = l3.lines_per_tag;
|
||||
size_in_kb = l3.size_encoded * 512;
|
||||
if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
|
||||
size_in_kb = size_in_kb >> 1;
|
||||
assoc = assoc >> 1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
@@ -270,18 +274,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
||||
eax->split.is_self_initializing = 1;
|
||||
eax->split.type = types[leaf];
|
||||
eax->split.level = levels[leaf];
|
||||
if (leaf == 3)
|
||||
eax->split.num_threads_sharing =
|
||||
current_cpu_data.x86_max_cores - 1;
|
||||
else
|
||||
eax->split.num_threads_sharing = 0;
|
||||
eax->split.num_threads_sharing = 0;
|
||||
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
|
||||
|
||||
|
||||
if (assoc == 0xf)
|
||||
if (assoc == 0xffff)
|
||||
eax->split.is_fully_associative = 1;
|
||||
ebx->split.coherency_line_size = line_size - 1;
|
||||
ebx->split.ways_of_associativity = assocs[assoc] - 1;
|
||||
ebx->split.ways_of_associativity = assoc - 1;
|
||||
ebx->split.physical_line_partition = lines_per_tag - 1;
|
||||
ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
|
||||
(ebx->split.ways_of_associativity + 1) - 1;
|
||||
@@ -523,6 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||
int index_msb, i;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
|
||||
struct cpuinfo_x86 *d;
|
||||
for_each_online_cpu(i) {
|
||||
if (!per_cpu(cpuid4_info, i))
|
||||
continue;
|
||||
d = &cpu_data(i);
|
||||
this_leaf = CPUID4_INFO_IDX(i, index);
|
||||
cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
|
||||
d->llc_shared_map);
|
||||
}
|
||||
return;
|
||||
}
|
||||
this_leaf = CPUID4_INFO_IDX(cpu, index);
|
||||
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
|
||||
|
||||
|
||||
@@ -489,12 +489,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
int i, err = 0;
|
||||
struct threshold_bank *b = NULL;
|
||||
char name[32];
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
|
||||
sprintf(name, "threshold_bank%i", bank);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
|
||||
i = cpumask_first(cpu_core_mask(cpu));
|
||||
i = cpumask_first(c->llc_shared_map);
|
||||
|
||||
/* first core not up yet */
|
||||
if (cpu_data(i).cpu_core_id)
|
||||
@@ -514,7 +516,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
cpumask_copy(b->cpus, cpu_core_mask(cpu));
|
||||
cpumask_copy(b->cpus, c->llc_shared_map);
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
|
||||
goto out;
|
||||
@@ -539,7 +541,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
#ifndef CONFIG_SMP
|
||||
cpumask_setall(b->cpus);
|
||||
#else
|
||||
cpumask_copy(b->cpus, cpu_core_mask(cpu));
|
||||
cpumask_copy(b->cpus, c->llc_shared_map);
|
||||
#endif
|
||||
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
|
||||
@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
|
||||
#endif
|
||||
seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
|
||||
#ifdef CONFIG_X86_64
|
||||
seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
|
||||
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
|
||||
c->x86_phys_bits, c->x86_virt_bits);
|
||||
#endif
|
||||
|
||||
seq_printf(m, "power management:");
|
||||
for (i = 0; i < 32; i++) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/* ----------------------------------------------------------------------- *
|
||||
*
|
||||
* Copyright 2000-2008 H. Peter Anvin - All Rights Reserved
|
||||
* Copyright 2009 Intel Corporation; author: H. Peter Anvin
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@@ -80,11 +81,8 @@ static ssize_t msr_read(struct file *file, char __user *buf,
|
||||
|
||||
for (; count; count -= 8) {
|
||||
err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
|
||||
if (err) {
|
||||
if (err == -EFAULT) /* Fix idiotic error code */
|
||||
err = -EIO;
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
if (copy_to_user(tmp, &data, 8)) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
@@ -115,11 +113,8 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
|
||||
break;
|
||||
}
|
||||
err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
|
||||
if (err) {
|
||||
if (err == -EFAULT) /* Fix idiotic error code */
|
||||
err = -EIO;
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
tmp += 2;
|
||||
bytes += 8;
|
||||
}
|
||||
@@ -127,6 +122,54 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
|
||||
return bytes ? bytes : err;
|
||||
}
|
||||
|
||||
static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
|
||||
{
|
||||
u32 __user *uregs = (u32 __user *)arg;
|
||||
u32 regs[8];
|
||||
int cpu = iminor(file->f_path.dentry->d_inode);
|
||||
int err;
|
||||
|
||||
switch (ioc) {
|
||||
case X86_IOC_RDMSR_REGS:
|
||||
if (!(file->f_mode & FMODE_READ)) {
|
||||
err = -EBADF;
|
||||
break;
|
||||
}
|
||||
if (copy_from_user(®s, uregs, sizeof regs)) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
err = rdmsr_safe_regs_on_cpu(cpu, regs);
|
||||
if (err)
|
||||
break;
|
||||
if (copy_to_user(uregs, ®s, sizeof regs))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
case X86_IOC_WRMSR_REGS:
|
||||
if (!(file->f_mode & FMODE_WRITE)) {
|
||||
err = -EBADF;
|
||||
break;
|
||||
}
|
||||
if (copy_from_user(®s, uregs, sizeof regs)) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
err = wrmsr_safe_regs_on_cpu(cpu, regs);
|
||||
if (err)
|
||||
break;
|
||||
if (copy_to_user(uregs, ®s, sizeof regs))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -ENOTTY;
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int msr_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned int cpu = iminor(file->f_path.dentry->d_inode);
|
||||
@@ -157,6 +200,8 @@ static const struct file_operations msr_fops = {
|
||||
.read = msr_read,
|
||||
.write = msr_write,
|
||||
.open = msr_open,
|
||||
.unlocked_ioctl = msr_ioctl,
|
||||
.compat_ioctl = msr_ioctl,
|
||||
};
|
||||
|
||||
static int __cpuinit msr_device_create(int cpu)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user