You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"Bigger changes:
- sched/idle restructuring: they are WIP preparation for deeper
integration between the scheduler and idle state selection, by
Nicolas Pitre.
- add NUMA scheduling pseudo-interleaving, by Rik van Riel.
- optimize cgroup context switches, by Peter Zijlstra.
- RT scheduling enhancements, by Thomas Gleixner.
The rest is smaller changes, non-urgnt fixes and cleanups"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (68 commits)
sched: Clean up the task_hot() function
sched: Remove double calculation in fix_small_imbalance()
sched: Fix broken setscheduler()
sparc64, sched: Remove unused sparc64_multi_core
sched: Remove unused mc_capable() and smt_capable()
sched/numa: Move task_numa_free() to __put_task_struct()
sched/fair: Fix endless loop in idle_balance()
sched/core: Fix endless loop in pick_next_task()
sched/fair: Push down check for high priority class task into idle_balance()
sched/rt: Fix picking RT and DL tasks from empty queue
trace: Replace hardcoding of 19 with MAX_NICE
sched: Guarantee task priority in pick_next_task()
sched/idle: Remove stale old file
sched: Put rq's sched_avg under CONFIG_FAIR_GROUP_SCHED
cpuidle/arm64: Remove redundant cpuidle_idle_call()
cpuidle/powernv: Remove redundant cpuidle_idle_call()
sched, nohz: Exclude isolated cores from load balancing
sched: Fix select_task_rq_fair() description comments
workqueue: Replace hardcoding of -20 and 19 with MIN_NICE and MAX_NICE
sys: Replace hardcoding of -20 and 19 with MIN_NICE and MAX_NICE
...
This commit is contained in:
@@ -442,8 +442,7 @@ feature should be disabled. Otherwise, if the system overhead from the
|
||||
feature is too high then the rate the kernel samples for NUMA hinting
|
||||
faults may be controlled by the numa_balancing_scan_period_min_ms,
|
||||
numa_balancing_scan_delay_ms, numa_balancing_scan_period_max_ms,
|
||||
numa_balancing_scan_size_mb, numa_balancing_settle_count sysctls and
|
||||
numa_balancing_migrate_deferred.
|
||||
numa_balancing_scan_size_mb, and numa_balancing_settle_count sysctls.
|
||||
|
||||
==============================================================
|
||||
|
||||
@@ -484,13 +483,6 @@ rate for each task.
|
||||
numa_balancing_scan_size_mb is how many megabytes worth of pages are
|
||||
scanned for a given scan.
|
||||
|
||||
numa_balancing_migrate_deferred is how many page migrations get skipped
|
||||
unconditionally, after a page migration is skipped because a page is shared
|
||||
with other tasks. This reduces page migration overhead, and determines
|
||||
how much stronger the "move task near its memory" policy scheduler becomes,
|
||||
versus the "move memory near its task" memory management policy, for workloads
|
||||
with shared memory.
|
||||
|
||||
==============================================================
|
||||
|
||||
osrelease, ostype & version:
|
||||
|
||||
@@ -20,9 +20,6 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
||||
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
|
||||
#define mc_capable() (cpu_topology[0].socket_id != -1)
|
||||
#define smt_capable() (cpu_topology[0].thread_id != -1)
|
||||
|
||||
void init_cpu_topology(void);
|
||||
void store_cpu_topology(unsigned int cpuid);
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
@@ -133,7 +132,11 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
|
||||
|
||||
void (*arm_pm_idle)(void);
|
||||
|
||||
static void default_idle(void)
|
||||
/*
|
||||
* Called from the core idle loop.
|
||||
*/
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
if (arm_pm_idle)
|
||||
arm_pm_idle();
|
||||
@@ -167,15 +170,6 @@ void arch_cpu_idle_dead(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called from the core idle loop.
|
||||
*/
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
if (cpuidle_idle_call())
|
||||
default_idle();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by kexec, immediately prior to machine_kexec().
|
||||
*
|
||||
|
||||
@@ -33,7 +33,6 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/tick.h>
|
||||
@@ -94,10 +93,8 @@ void arch_cpu_idle(void)
|
||||
* This should do all the clock switching and wait for interrupt
|
||||
* tricks
|
||||
*/
|
||||
if (cpuidle_idle_call()) {
|
||||
cpu_do_idle();
|
||||
local_irq_enable();
|
||||
}
|
||||
cpu_do_idle();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
@@ -77,7 +77,6 @@ void build_cpu_to_node_map(void);
|
||||
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define smt_capable() (smp_num_siblings > 1)
|
||||
#endif
|
||||
|
||||
extern void arch_fix_phys_package_id(int num, u32 slot);
|
||||
|
||||
@@ -10,8 +10,4 @@
|
||||
|
||||
#include <topology.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define smt_capable() (smp_num_siblings > 1)
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_TOPOLOGY_H */
|
||||
|
||||
@@ -99,7 +99,6 @@ static inline int prrn_is_enabled(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#include <asm/cputable.h>
|
||||
#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/smp.h>
|
||||
|
||||
@@ -83,7 +83,6 @@ static struct timer_list spuloadavg_timer;
|
||||
#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
|
||||
#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
|
||||
|
||||
#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
|
||||
#define SCALE_PRIO(x, prio) \
|
||||
max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/machdep.h>
|
||||
@@ -225,16 +224,6 @@ static int __init pnv_probe(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void powernv_idle(void)
|
||||
{
|
||||
/* Hook to cpuidle framework if available, else
|
||||
* call on default platform idle code
|
||||
*/
|
||||
if (cpuidle_idle_call()) {
|
||||
power7_idle();
|
||||
}
|
||||
}
|
||||
|
||||
define_machine(powernv) {
|
||||
.name = "PowerNV",
|
||||
.probe = pnv_probe,
|
||||
@@ -244,7 +233,7 @@ define_machine(powernv) {
|
||||
.show_cpuinfo = pnv_show_cpuinfo,
|
||||
.progress = pnv_progress,
|
||||
.machine_shutdown = pnv_shutdown,
|
||||
.power_save = powernv_idle,
|
||||
.power_save = power7_idle,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
.dma_set_mask = pnv_dma_set_mask,
|
||||
#ifdef CONFIG_KEXEC
|
||||
|
||||
@@ -39,7 +39,6 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/root_dev.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/kexec.h>
|
||||
|
||||
@@ -356,29 +355,24 @@ early_initcall(alloc_dispatch_log_kmem_cache);
|
||||
|
||||
static void pseries_lpar_idle(void)
|
||||
{
|
||||
/* This would call on the cpuidle framework, and the back-end pseries
|
||||
* driver to go to idle states
|
||||
/*
|
||||
* Default handler to go into low thread priority and possibly
|
||||
* low power mode by cedeing processor to hypervisor
|
||||
*/
|
||||
if (cpuidle_idle_call()) {
|
||||
/* On error, execute default handler
|
||||
* to go into low thread priority and possibly
|
||||
* low power mode by cedeing processor to hypervisor
|
||||
*/
|
||||
|
||||
/* Indicate to hypervisor that we are idle. */
|
||||
get_lppaca()->idle = 1;
|
||||
/* Indicate to hypervisor that we are idle. */
|
||||
get_lppaca()->idle = 1;
|
||||
|
||||
/*
|
||||
* Yield the processor to the hypervisor. We return if
|
||||
* an external interrupt occurs (which are driven prior
|
||||
* to returning here) or if a prod occurs from another
|
||||
* processor. When returning here, external interrupts
|
||||
* are enabled.
|
||||
*/
|
||||
cede_processor();
|
||||
/*
|
||||
* Yield the processor to the hypervisor. We return if
|
||||
* an external interrupt occurs (which are driven prior
|
||||
* to returning here) or if a prod occurs from another
|
||||
* processor. When returning here, external interrupts
|
||||
* are enabled.
|
||||
*/
|
||||
cede_processor();
|
||||
|
||||
get_lppaca()->idle = 0;
|
||||
}
|
||||
get_lppaca()->idle = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/smp.h>
|
||||
@@ -40,8 +39,7 @@ void arch_cpu_idle_dead(void)
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
if (cpuidle_idle_call())
|
||||
sh_idle();
|
||||
sh_idle();
|
||||
}
|
||||
|
||||
void __init select_idle_routine(void)
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
|
||||
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
|
||||
extern cpumask_t cpu_core_map[NR_CPUS];
|
||||
extern int sparc64_multi_core;
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
@@ -42,8 +42,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define mc_capable() (sparc64_multi_core)
|
||||
#define smt_capable() (sparc64_multi_core)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
extern cpumask_t cpu_core_map[NR_CPUS];
|
||||
|
||||
@@ -896,10 +896,6 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
|
||||
|
||||
mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
sparc64_multi_core = 1;
|
||||
#endif
|
||||
|
||||
hp = mdesc_grab();
|
||||
|
||||
set_core_ids(hp);
|
||||
|
||||
@@ -555,9 +555,6 @@ static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
|
||||
|
||||
cpu_data(cpuid).core_id = portid + 1;
|
||||
cpu_data(cpuid).proc_id = portid;
|
||||
#ifdef CONFIG_SMP
|
||||
sparc64_multi_core = 1;
|
||||
#endif
|
||||
} else {
|
||||
cpu_data(cpuid).dcache_size =
|
||||
of_getintprop_default(dp, "dcache-size", 16 * 1024);
|
||||
|
||||
@@ -53,8 +53,6 @@
|
||||
|
||||
#include "cpumap.h"
|
||||
|
||||
int sparc64_multi_core __read_mostly;
|
||||
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
|
||||
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
|
||||
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
|
||||
|
||||
@@ -134,12 +134,6 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
|
||||
struct pci_bus;
|
||||
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \
|
||||
(cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids))
|
||||
#define smt_capable() (smp_num_siblings > 1)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int get_mp_bus_to_node(int busnum);
|
||||
extern void set_mp_bus_to_node(int busnum, int node);
|
||||
|
||||
@@ -298,10 +298,7 @@ void arch_cpu_idle_dead(void)
|
||||
*/
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
if (cpuidle_idle_call())
|
||||
x86_idle();
|
||||
else
|
||||
local_irq_enable();
|
||||
x86_idle();
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/runlatch.h>
|
||||
|
||||
struct cpuidle_driver powernv_idle_driver = {
|
||||
.name = "powernv_idle",
|
||||
@@ -30,12 +31,14 @@ static int snooze_loop(struct cpuidle_device *dev,
|
||||
local_irq_enable();
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
|
||||
ppc64_runlatch_off();
|
||||
while (!need_resched()) {
|
||||
HMT_low();
|
||||
HMT_very_low();
|
||||
}
|
||||
|
||||
HMT_medium();
|
||||
ppc64_runlatch_on();
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
smp_mb();
|
||||
return index;
|
||||
@@ -45,7 +48,9 @@ static int nap_loop(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
ppc64_runlatch_off();
|
||||
power7_idle();
|
||||
ppc64_runlatch_on();
|
||||
return index;
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include <asm/reg.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/runlatch.h>
|
||||
#include <asm/plpar_wrappers.h>
|
||||
|
||||
struct cpuidle_driver pseries_idle_driver = {
|
||||
@@ -29,6 +30,7 @@ static struct cpuidle_state *cpuidle_state_table;
|
||||
|
||||
static inline void idle_loop_prolog(unsigned long *in_purr)
|
||||
{
|
||||
ppc64_runlatch_off();
|
||||
*in_purr = mfspr(SPRN_PURR);
|
||||
/*
|
||||
* Indicate to the HV that we are idle. Now would be
|
||||
@@ -45,6 +47,10 @@ static inline void idle_loop_epilog(unsigned long in_purr)
|
||||
wait_cycles += mfspr(SPRN_PURR) - in_purr;
|
||||
get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
|
||||
get_lppaca()->idle = 0;
|
||||
|
||||
if (irqs_disabled())
|
||||
local_irq_enable();
|
||||
ppc64_runlatch_on();
|
||||
}
|
||||
|
||||
static int snooze_loop(struct cpuidle_device *dev,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user