You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'amd-iommu/2.6.32' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into core/iommu
This commit is contained in:
+365
-134
File diff suppressed because it is too large
Load Diff
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
|
||||
/* Function to enable the hardware */
|
||||
static void iommu_enable(struct amd_iommu *iommu)
|
||||
{
|
||||
printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
|
||||
printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
|
||||
dev_name(&iommu->dev->dev), iommu->cap_ptr);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
|
||||
@@ -434,6 +434,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
|
||||
return cmd_buf;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function resets the command buffer if the IOMMU stopped fetching
|
||||
* commands from it.
|
||||
*/
|
||||
void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
|
||||
|
||||
writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
|
||||
writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function writes the command buffer address to the hardware and
|
||||
* enables it.
|
||||
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
|
||||
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
|
||||
/* set head and tail to zero manually */
|
||||
writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
|
||||
writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
|
||||
amd_iommu_reset_cmd_buffer(iommu);
|
||||
}
|
||||
|
||||
static void __init free_command_buffer(struct amd_iommu *iommu)
|
||||
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
||||
switch (*p) {
|
||||
case ACPI_IVHD_TYPE:
|
||||
|
||||
DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x "
|
||||
DUMP_printk("device: %02x:%02x.%01x cap: %04x "
|
||||
"seg: %d flags: %01x info %04x\n",
|
||||
PCI_BUS(h->devid), PCI_SLOT(h->devid),
|
||||
PCI_FUNC(h->devid), h->cap_ptr,
|
||||
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
|
||||
|
||||
r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
|
||||
IRQF_SAMPLE_RANDOM,
|
||||
"AMD IOMMU",
|
||||
"AMD-Vi",
|
||||
NULL);
|
||||
|
||||
if (r) {
|
||||
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void)
|
||||
|
||||
|
||||
if (no_iommu) {
|
||||
printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n");
|
||||
printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1242,22 +1252,28 @@ int __init amd_iommu_init(void)
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
ret = amd_iommu_init_dma_ops();
|
||||
if (iommu_pass_through)
|
||||
ret = amd_iommu_init_passthrough();
|
||||
else
|
||||
ret = amd_iommu_init_dma_ops();
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
enable_iommus();
|
||||
|
||||
printk(KERN_INFO "AMD IOMMU: device isolation ");
|
||||
if (iommu_pass_through)
|
||||
goto out;
|
||||
|
||||
printk(KERN_INFO "AMD-Vi: device isolation ");
|
||||
if (amd_iommu_isolate)
|
||||
printk("enabled\n");
|
||||
else
|
||||
printk("disabled\n");
|
||||
|
||||
if (amd_iommu_unmap_flush)
|
||||
printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n");
|
||||
printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
|
||||
else
|
||||
printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n");
|
||||
printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
||||
@@ -3793,6 +3793,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
|
||||
@@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
|
||||
unsigned long mask = cpumask_bits(cpumask)[0];
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ONCE(!mask, "empty IPI mask"))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
|
||||
__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
|
||||
|
||||
@@ -44,6 +44,11 @@ static struct apic *apic_probe[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
{
|
||||
return hard_smp_processor_id() >> index_msb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
|
||||
*/
|
||||
@@ -69,6 +74,11 @@ void __init default_setup_apic_routing(void)
|
||||
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
|
||||
}
|
||||
|
||||
if (is_vsmp_box()) {
|
||||
/* need to update phys_pkg_id */
|
||||
apic->phys_pkg_id = apicid_phys_pkg_id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that apic routing model is selected, configure the
|
||||
* fault handling for intr remapping.
|
||||
|
||||
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return x2apic_enabled();
|
||||
}
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
/*
|
||||
* need to use more than cpu 0, because we need more vectors when
|
||||
* MSI-X are used.
|
||||
*/
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -170,7 +172,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||
|
||||
static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
{
|
||||
return current_cpu_data.initial_apicid >> index_msb;
|
||||
return initial_apicid >> index_msb;
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_self(int vector)
|
||||
|
||||
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
/*
|
||||
* need to use more than cpu 0, because we need more vectors when
|
||||
* MSI-X are used.
|
||||
*/
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
@@ -162,7 +164,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||
|
||||
static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
{
|
||||
return current_cpu_data.initial_apicid >> index_msb;
|
||||
return initial_apicid >> index_msb;
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_self(int vector)
|
||||
|
||||
@@ -46,7 +46,7 @@ static int early_get_nodeid(void)
|
||||
return node_id.s.node_id;
|
||||
}
|
||||
|
||||
static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
{
|
||||
if (!strcmp(oem_id, "SGI")) {
|
||||
if (!strcmp(oem_table_id, "UVL"))
|
||||
@@ -253,7 +253,7 @@ static void uv_send_IPI_self(int vector)
|
||||
apic_write(APIC_SELF_IPI, vector);
|
||||
}
|
||||
|
||||
struct apic apic_x2apic_uv_x = {
|
||||
struct apic __refdata apic_x2apic_uv_x = {
|
||||
|
||||
.name = "UV large system",
|
||||
.probe = NULL,
|
||||
@@ -261,7 +261,7 @@ struct apic apic_x2apic_uv_x = {
|
||||
.apic_id_registered = uv_apic_id_registered,
|
||||
|
||||
.irq_delivery_mode = dest_Fixed,
|
||||
.irq_dest_mode = 1, /* logical */
|
||||
.irq_dest_mode = 0, /* physical */
|
||||
|
||||
.target_cpus = uv_target_cpus,
|
||||
.disable_esr = 0,
|
||||
@@ -362,12 +362,6 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static __init void map_low_mmrs(void)
|
||||
{
|
||||
init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
|
||||
init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
|
||||
}
|
||||
|
||||
enum map_type {map_wb, map_uc};
|
||||
|
||||
static __init void map_high(char *id, unsigned long base, int shift,
|
||||
@@ -395,26 +389,6 @@ static __init void map_gru_high(int max_pnode)
|
||||
map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
|
||||
}
|
||||
|
||||
static __init void map_config_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
|
||||
int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||
|
||||
cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
|
||||
if (cfg.s.enable)
|
||||
map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
|
||||
}
|
||||
|
||||
static __init void map_mmr_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
|
||||
int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||
|
||||
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
|
||||
if (mmr.s.enable)
|
||||
map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
|
||||
}
|
||||
|
||||
static __init void map_mmioh_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
||||
@@ -566,8 +540,6 @@ void __init uv_system_init(void)
|
||||
unsigned long mmr_base, present, paddr;
|
||||
unsigned short pnode_mask;
|
||||
|
||||
map_low_mmrs();
|
||||
|
||||
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
||||
m_val = m_n_config.s.m_skt;
|
||||
n_val = m_n_config.s.n_skt;
|
||||
@@ -591,6 +563,8 @@ void __init uv_system_init(void)
|
||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_blade_info);
|
||||
for (blade = 0; blade < uv_num_possible_blades(); blade++)
|
||||
uv_blade_info[blade].memory_nid = -1;
|
||||
|
||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||
|
||||
@@ -629,6 +603,9 @@ void __init uv_system_init(void)
|
||||
lcpu = uv_blade_info[blade].nr_possible_cpus;
|
||||
uv_blade_info[blade].nr_possible_cpus++;
|
||||
|
||||
/* Any node on the blade, else will contain -1. */
|
||||
uv_blade_info[blade].memory_nid = nid;
|
||||
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
|
||||
uv_cpu_hub_info(cpu)->m_val = m_val;
|
||||
@@ -662,11 +639,10 @@ void __init uv_system_init(void)
|
||||
pnode = (paddr >> m_val) & pnode_mask;
|
||||
blade = boot_pnode_to_blade(pnode);
|
||||
uv_node_to_blade[nid] = blade;
|
||||
max_pnode = max(pnode, max_pnode);
|
||||
}
|
||||
|
||||
map_gru_high(max_pnode);
|
||||
map_mmr_high(max_pnode);
|
||||
map_config_high(max_pnode);
|
||||
map_mmioh_high(max_pnode);
|
||||
|
||||
uv_cpu_init();
|
||||
|
||||
@@ -811,7 +811,7 @@ static int apm_do_idle(void)
|
||||
u8 ret = 0;
|
||||
int idled = 0;
|
||||
int polling;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
polling = !!(current_thread_info()->status & TS_POLLING);
|
||||
if (polling) {
|
||||
|
||||
@@ -7,6 +7,10 @@ ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_common.o = -pg
|
||||
endif
|
||||
|
||||
# Make sure load_percpu_segment has no stackprotector
|
||||
nostackp := $(call cc-option, -fno-stack-protector)
|
||||
CFLAGS_common.o := $(nostackp)
|
||||
|
||||
obj-y := intel_cacheinfo.o addon_cpuid_features.o
|
||||
obj-y += proc.o capflags.o powerflags.o common.o
|
||||
obj-y += vmware.o hypervisor.o
|
||||
|
||||
@@ -400,6 +400,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
level = cpuid_eax(1);
|
||||
if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
/*
|
||||
* Some BIOSes incorrectly force this feature, but only K8
|
||||
* revision D (model = 0x14) and later actually support it.
|
||||
*/
|
||||
if (c->x86_model < 0x14)
|
||||
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
|
||||
}
|
||||
if (c->x86 == 0x10 || c->x86 == 0x11)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
@@ -59,7 +59,30 @@ void __init setup_cpu_local_masks(void)
|
||||
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
|
||||
}
|
||||
|
||||
static const struct cpu_dev *this_cpu __cpuinitdata;
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
display_cacheinfo(c);
|
||||
#else
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct cpu_dev __cpuinitconst default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
|
||||
static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
|
||||
|
||||
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -332,29 +355,6 @@ void switch_to_new_gdt(int cpu)
|
||||
|
||||
static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
display_cacheinfo(c);
|
||||
#else
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct cpu_dev __cpuinitconst default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
|
||||
static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int *v;
|
||||
|
||||
@@ -1226,8 +1226,13 @@ static void mce_init(void)
|
||||
}
|
||||
|
||||
/* Add per CPU specific workarounds here */
|
||||
static void mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
static int mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
|
||||
pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* This should be disabled by the BIOS, but isn't always */
|
||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||
if (c->x86 == 15 && banks > 4) {
|
||||
@@ -1273,11 +1278,20 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
|
||||
monarch_timeout < 0)
|
||||
monarch_timeout = USEC_PER_SEC;
|
||||
|
||||
/*
|
||||
* There are also broken BIOSes on some Pentium M and
|
||||
* earlier systems:
|
||||
*/
|
||||
if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
|
||||
mce_bootlog = 0;
|
||||
}
|
||||
if (monarch_timeout < 0)
|
||||
monarch_timeout = 0;
|
||||
if (mce_bootlog != 0)
|
||||
mce_panic_timeout = 30;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
|
||||
@@ -1338,11 +1352,10 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
|
||||
if (!mce_available(c))
|
||||
return;
|
||||
|
||||
if (mce_cap_init() < 0) {
|
||||
if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) {
|
||||
mce_disabled = 1;
|
||||
return;
|
||||
}
|
||||
mce_cpu_quirks(c);
|
||||
|
||||
machine_check_vector = do_machine_check;
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
|
||||
static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
|
||||
static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
|
||||
static DEFINE_PER_CPU(bool, thermal_throttle_active);
|
||||
|
||||
static atomic_t therm_throt_en = ATOMIC_INIT(0);
|
||||
|
||||
@@ -96,27 +97,33 @@ static int therm_throt_process(int curr)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
__u64 tmp_jiffs = get_jiffies_64();
|
||||
bool was_throttled = __get_cpu_var(thermal_throttle_active);
|
||||
bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
|
||||
|
||||
if (curr)
|
||||
if (is_throttled)
|
||||
__get_cpu_var(thermal_throttle_count)++;
|
||||
|
||||
if (time_before64(tmp_jiffs, __get_cpu_var(next_check)))
|
||||
if (!(was_throttled ^ is_throttled) &&
|
||||
time_before64(tmp_jiffs, __get_cpu_var(next_check)))
|
||||
return 0;
|
||||
|
||||
__get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
|
||||
|
||||
/* if we just entered the thermal event */
|
||||
if (curr) {
|
||||
if (is_throttled) {
|
||||
printk(KERN_CRIT "CPU%d: Temperature above threshold, "
|
||||
"cpu clock throttled (total events = %lu)\n", cpu,
|
||||
__get_cpu_var(thermal_throttle_count));
|
||||
"cpu clock throttled (total events = %lu)\n",
|
||||
cpu, __get_cpu_var(thermal_throttle_count));
|
||||
|
||||
add_taint(TAINT_MACHINE_CHECK);
|
||||
} else {
|
||||
printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu);
|
||||
return 1;
|
||||
}
|
||||
if (was_throttled) {
|
||||
printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
@@ -55,6 +55,7 @@ struct x86_pmu {
|
||||
int num_counters_fixed;
|
||||
int counter_bits;
|
||||
u64 counter_mask;
|
||||
int apic;
|
||||
u64 max_period;
|
||||
u64 intel_ctrl;
|
||||
};
|
||||
@@ -72,8 +73,8 @@ static const u64 p6_perfmon_event_map[] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
|
||||
@@ -613,6 +614,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||
|
||||
static bool reserve_pmc_hardware(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
int i;
|
||||
|
||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||
@@ -627,9 +629,11 @@ static bool reserve_pmc_hardware(void)
|
||||
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
|
||||
goto eventsel_fail;
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
eventsel_fail:
|
||||
for (i--; i >= 0; i--)
|
||||
release_evntsel_nmi(x86_pmu.eventsel + i);
|
||||
@@ -644,10 +648,12 @@ perfctr_fail:
|
||||
enable_lapic_nmi_watchdog();
|
||||
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void release_pmc_hardware(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
@@ -657,6 +663,7 @@ static void release_pmc_hardware(void)
|
||||
|
||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||
enable_lapic_nmi_watchdog();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void hw_perf_counter_destroy(struct perf_counter *counter)
|
||||
@@ -748,6 +755,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||
hwc->sample_period = x86_pmu.max_period;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
atomic64_set(&hwc->period_left, hwc->sample_period);
|
||||
} else {
|
||||
/*
|
||||
* If we have a PMU initialized but no APIC
|
||||
* interrupts, we cannot sample hardware
|
||||
* counters (user-space has to fall back and
|
||||
* sample via a hrtimer based software counter):
|
||||
*/
|
||||
if (!x86_pmu.apic)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
counter->destroy = hw_perf_counter_destroy;
|
||||
@@ -1449,18 +1465,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
|
||||
|
||||
void set_perf_counter_pending(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
|
||||
#endif
|
||||
}
|
||||
|
||||
void perf_counters_lapic_init(void)
|
||||
{
|
||||
if (!x86_pmu_initialized())
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
if (!x86_pmu.apic || !x86_pmu_initialized())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Always use NMI for PMU
|
||||
*/
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
@@ -1484,7 +1504,9 @@ perf_counter_nmi_handler(struct notifier_block *self,
|
||||
|
||||
regs = args->regs;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
#endif
|
||||
/*
|
||||
* Can't rely on the handled return value to say it was our NMI, two
|
||||
* counters could trigger 'simultaneously' raising two back-to-back NMIs.
|
||||
@@ -1515,6 +1537,7 @@ static struct x86_pmu p6_pmu = {
|
||||
.event_map = p6_pmu_event_map,
|
||||
.raw_event = p6_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.version = 0,
|
||||
.num_counters = 2,
|
||||
@@ -1541,6 +1564,7 @@ static struct x86_pmu intel_pmu = {
|
||||
.event_map = intel_pmu_event_map,
|
||||
.raw_event = intel_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
||||
* so we install an artificial 1<<31 period regardless of
|
||||
@@ -1564,6 +1588,7 @@ static struct x86_pmu amd_pmu = {
|
||||
.num_counters = 4,
|
||||
.counter_bits = 48,
|
||||
.counter_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
};
|
||||
@@ -1589,12 +1614,13 @@ static int p6_pmu_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no Local APIC, try rebooting with lapic");
|
||||
return -ENODEV;
|
||||
}
|
||||
x86_pmu = p6_pmu;
|
||||
|
||||
x86_pmu = p6_pmu;
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
|
||||
pr_info("no hardware sampling interrupt available.\n");
|
||||
x86_pmu.apic = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -354,7 +354,7 @@ void __init efi_init(void)
|
||||
*/
|
||||
c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
|
||||
if (c16) {
|
||||
for (i = 0; i < sizeof(vendor) && *c16; ++i)
|
||||
for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
|
||||
vendor[i] = *c16++;
|
||||
vendor[i] = '\0';
|
||||
} else
|
||||
@@ -512,7 +512,7 @@ void __init efi_enter_virtual_mode(void)
|
||||
&& end_pfn <= max_pfn_mapped))
|
||||
va = __va(md->phys_addr);
|
||||
else
|
||||
va = efi_ioremap(md->phys_addr, size);
|
||||
va = efi_ioremap(md->phys_addr, size, md->type);
|
||||
|
||||
md->virt_addr = (u64) (unsigned long) va;
|
||||
|
||||
|
||||
@@ -98,10 +98,14 @@ void __init efi_call_phys_epilog(void)
|
||||
early_runtime_code_mapping_set_exec(0);
|
||||
}
|
||||
|
||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
||||
u32 type)
|
||||
{
|
||||
unsigned long last_map_pfn;
|
||||
|
||||
if (type == EFI_MEMORY_MAPPED_IO)
|
||||
return ioremap(phys_addr, size);
|
||||
|
||||
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
||||
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
|
||||
return NULL;
|
||||
|
||||
@@ -261,9 +261,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
|
||||
* which will be freed later
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
.section .init.text,"ax",@progbits
|
||||
#endif
|
||||
__CPUINIT
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
ENTRY(startup_32_smp)
|
||||
@@ -602,7 +600,7 @@ ignore_int:
|
||||
#endif
|
||||
iret
|
||||
|
||||
.section .cpuinit.data,"wa"
|
||||
__REFDATA
|
||||
.align 4
|
||||
ENTRY(initial_code)
|
||||
.long i386_start_kernel
|
||||
|
||||
@@ -32,7 +32,14 @@ int no_iommu __read_mostly;
|
||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||
int iommu_detected __read_mostly = 0;
|
||||
|
||||
int iommu_pass_through;
|
||||
/*
|
||||
* This variable becomes 1 if iommu=pt is passed on the kernel command line.
|
||||
* If this variable is 1, IOMMU implementations do no DMA ranslation for
|
||||
* devices and allow every device to access to whole physical memory. This is
|
||||
* useful if a user want to use an IOMMU only for KVM device assignment to
|
||||
* guests and not for driver dma translation.
|
||||
*/
|
||||
int iommu_pass_through __read_mostly;
|
||||
|
||||
dma_addr_t bad_dma_address __read_mostly = 0;
|
||||
EXPORT_SYMBOL(bad_dma_address);
|
||||
|
||||
@@ -519,16 +519,12 @@ static void c1e_idle(void)
|
||||
if (!cpumask_test_cpu(cpu, c1e_mask)) {
|
||||
cpumask_set_cpu(cpu, c1e_mask);
|
||||
/*
|
||||
* Force broadcast so ACPI can not interfere. Needs
|
||||
* to run with interrupts enabled as it uses
|
||||
* smp_function_call.
|
||||
* Force broadcast so ACPI can not interfere.
|
||||
*/
|
||||
local_irq_enable();
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
|
||||
&cpu);
|
||||
printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
|
||||
cpu);
|
||||
local_irq_disable();
|
||||
}
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user