mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'kthread-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks
Pull kthread updates from Frederic Weisbecker:
"Kthreads affinity follow either of 4 existing different patterns:
1) Per-CPU kthreads must stay affine to a single CPU and never
execute relevant code on any other CPU. This is currently handled
by smpboot code which takes care of CPU-hotplug operations.
Affinity here is a correctness constraint.
2) Some kthreads _have_ to be affine to a specific set of CPUs and
can't run anywhere else. The affinity is set through
kthread_bind_mask() and the subsystem takes care by itself to
handle CPU-hotplug operations. Affinity here is assumed to be a
correctness constraint.
3) Per-node kthreads _prefer_ to be affine to a specific NUMA node.
This is not a correctness constraint but merely a preference in
terms of memory locality. kswapd and kcompactd both fall into this
category. The affinity is set manually like for any other task and
CPU-hotplug is supposed to be handled by the relevant subsystem so
that the task is properly reaffined whenever a given CPU from the
node comes up. Also care should be taken so that the node affinity
doesn't cross isolated (nohz_full) cpumask boundaries.
4) Similar to the previous point except kthreads have a _preferred_
affinity different than a node. Both RCU boost kthreads and RCU
exp kworkers fall into this category as they refer to "RCU nodes"
from a distinctly distributed tree.
Currently the preferred affinity patterns (3 and 4) have at least 4
identified users, with more or less success when it comes to handle
CPU-hotplug operations and CPU isolation. Each of which do it in its
own ad-hoc way.
This is an infrastructure proposal to handle this with the following
API changes:
- kthread_create_on_node() automatically affines the created kthread
to its target node unless it has been set as per-cpu or bound with
kthread_bind[_mask]() before the first wake-up.
- kthread_affine_preferred() is a new function that can be called
right after kthread_create_on_node() to specify a preferred
affinity different than the specified node.
When the preferred affinity can't be applied because the possible
targets are offline or isolated (nohz_full), the kthread is affine to
the housekeeping CPUs (which means to all online CPUs most of the time
or only the non-nohz_full CPUs when nohz_full= is set).
kswapd, kcompactd, RCU boost kthreads and RCU exp kworkers have been
converted, along with a few old drivers.
Summary of the changes:
- Consolidate a bunch of ad-hoc implementations of
kthread_run_on_cpu()
- Introduce task_cpu_fallback_mask() that defines the default last
resort affinity of a task to become nohz_full aware
- Add some correctness check to ensure kthread_bind() is always
called before the first kthread wake up.
- Default affine kthread to its preferred node.
- Convert kswapd / kcompactd and remove their halfway working ad-hoc
affinity implementation
- Implement kthreads preferred affinity
- Unify kthread worker and kthread API's style
- Convert RCU kthreads to the new API and remove the ad-hoc affinity
implementation"
* tag 'kthread-for-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks:
kthread: modify kernel-doc function name to match code
rcu: Use kthread preferred affinity for RCU exp kworkers
treewide: Introduce kthread_run_worker[_on_cpu]()
kthread: Unify kthread_create_on_cpu() and kthread_create_worker_on_cpu() automatic format
rcu: Use kthread preferred affinity for RCU boost
kthread: Implement preferred affinity
mm: Create/affine kswapd to its preferred node
mm: Create/affine kcompactd to its preferred node
kthread: Default affine kthread to its preferred NUMA node
kthread: Make sure kthread hasn't started while binding it
sched,arm64: Handle CPU isolation on last resort fallback rq selection
arm64: Exclude nohz_full CPUs from 32bits el0 support
lib: test_objpool: Use kthread_run_on_cpu()
kallsyms: Use kthread_run_on_cpu()
soc/qman: test: Use kthread_run_on_cpu()
arm/bL_switcher: Use kthread_run_on_cpu()
This commit is contained in:
@@ -153,3 +153,11 @@ asymmetric system, a broken guest at EL1 could still attempt to execute
|
||||
mode will return to host userspace with an ``exit_reason`` of
|
||||
``KVM_EXIT_FAIL_ENTRY`` and will remain non-runnable until successfully
|
||||
re-initialised by a subsequent ``KVM_ARM_VCPU_INIT`` operation.
|
||||
|
||||
NOHZ FULL
|
||||
---------
|
||||
|
||||
To avoid perturbing an adaptive-ticks CPU (specified using
|
||||
``nohz_full=``) when a 32-bit task is forcefully migrated, these CPUs
|
||||
are treated as 64-bit-only when support for asymmetric 32-bit systems
|
||||
is enabled.
|
||||
|
||||
@@ -307,13 +307,11 @@ static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
task = kthread_create_on_node(bL_switcher_thread, arg,
|
||||
cpu_to_node(cpu), "kswitcher_%d", cpu);
|
||||
if (!IS_ERR(task)) {
|
||||
kthread_bind(task, cpu);
|
||||
wake_up_process(task);
|
||||
} else
|
||||
task = kthread_run_on_cpu(bL_switcher_thread, arg,
|
||||
cpu, "kswitcher_%d");
|
||||
if (IS_ERR(task))
|
||||
pr_err("%s failed for CPU %d\n", __func__, cpu);
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
|
||||
@@ -671,6 +671,7 @@ static inline bool supports_clearbhb(int scope)
|
||||
}
|
||||
|
||||
const struct cpumask *system_32bit_el0_cpumask(void);
|
||||
const struct cpumask *fallback_32bit_el0_cpumask(void);
|
||||
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
|
||||
|
||||
static inline bool system_supports_32bit_el0(void)
|
||||
|
||||
@@ -271,18 +271,26 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
}
|
||||
|
||||
static inline const struct cpumask *
|
||||
task_cpu_possible_mask(struct task_struct *p)
|
||||
__task_cpu_possible_mask(struct task_struct *p, const struct cpumask *mask)
|
||||
{
|
||||
if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
|
||||
return cpu_possible_mask;
|
||||
return mask;
|
||||
|
||||
if (!is_compat_thread(task_thread_info(p)))
|
||||
return cpu_possible_mask;
|
||||
return mask;
|
||||
|
||||
return system_32bit_el0_cpumask();
|
||||
}
|
||||
|
||||
static inline const struct cpumask *
|
||||
task_cpu_possible_mask(struct task_struct *p)
|
||||
{
|
||||
return __task_cpu_possible_mask(p, cpu_possible_mask);
|
||||
}
|
||||
#define task_cpu_possible_mask task_cpu_possible_mask
|
||||
|
||||
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p);
|
||||
|
||||
void verify_cpu_asid_bits(void);
|
||||
void post_ttbr_update_workaround(void);
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
@@ -1644,6 +1645,11 @@ const struct cpumask *system_32bit_el0_cpumask(void)
|
||||
return cpu_possible_mask;
|
||||
}
|
||||
|
||||
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p)
|
||||
{
|
||||
return __task_cpu_possible_mask(p, housekeeping_cpumask(HK_TYPE_TICK));
|
||||
}
|
||||
|
||||
static int __init parse_32bit_el0_param(char *str)
|
||||
{
|
||||
allow_mismatched_32bit_el0 = true;
|
||||
@@ -3773,7 +3779,14 @@ static int enable_mismatched_32bit_el0(unsigned int cpu)
|
||||
static int lucky_winner = -1;
|
||||
|
||||
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
|
||||
bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
|
||||
bool cpu_32bit = false;
|
||||
|
||||
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
|
||||
if (!housekeeping_cpu(cpu, HK_TYPE_TICK))
|
||||
pr_info("Treating adaptive-ticks CPU %u as 64-bit only\n", cpu);
|
||||
else
|
||||
cpu_32bit = true;
|
||||
}
|
||||
|
||||
if (cpu_32bit) {
|
||||
cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
|
||||
|
||||
@@ -681,7 +681,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
|
||||
pid_nr = pid_vnr(pid);
|
||||
put_pid(pid);
|
||||
|
||||
pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
|
||||
pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr);
|
||||
if (IS_ERR(pit->worker))
|
||||
goto fail_kthread;
|
||||
|
||||
|
||||
@@ -517,7 +517,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
|
||||
crypto_init_queue(&engine->queue, qlen);
|
||||
spin_lock_init(&engine->queue_lock);
|
||||
|
||||
engine->kworker = kthread_create_worker(0, "%s", engine->name);
|
||||
engine->kworker = kthread_run_worker(0, "%s", engine->name);
|
||||
if (IS_ERR(engine->kworker)) {
|
||||
dev_err(dev, "failed to create crypto request pump task\n");
|
||||
return NULL;
|
||||
|
||||
@@ -225,7 +225,7 @@ static void __init cppc_freq_invariance_init(void)
|
||||
if (fie_disabled)
|
||||
return;
|
||||
|
||||
kworker_fie = kthread_create_worker(0, "cppc_fie");
|
||||
kworker_fie = kthread_run_worker(0, "cppc_fie");
|
||||
if (IS_ERR(kworker_fie)) {
|
||||
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
|
||||
PTR_ERR(kworker_fie));
|
||||
|
||||
@@ -277,7 +277,7 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
|
||||
|
||||
INIT_LIST_HEAD(&vblank->pending_work);
|
||||
init_waitqueue_head(&vblank->work_wait_queue);
|
||||
worker = kthread_create_worker(0, "card%d-crtc%d",
|
||||
worker = kthread_run_worker(0, "card%d-crtc%d",
|
||||
vblank->dev->primary->index,
|
||||
vblank->pipe);
|
||||
if (IS_ERR(worker))
|
||||
|
||||
@@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg)
|
||||
if (!data[n].ce[0])
|
||||
continue;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/parallel:%s",
|
||||
worker = kthread_run_worker(0, "igt/parallel:%s",
|
||||
data[n].ce[0]->engine->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
|
||||
@@ -3574,7 +3574,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
|
||||
arg[id].batch = NULL;
|
||||
arg[id].count = 0;
|
||||
|
||||
worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
|
||||
worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
|
||||
if (IS_ERR(worker[id])) {
|
||||
err = PTR_ERR(worker[id]);
|
||||
break;
|
||||
|
||||
@@ -1025,7 +1025,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
|
||||
threads[tmp].engine = other;
|
||||
threads[tmp].flags = flags;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/%s",
|
||||
worker = kthread_run_worker(0, "igt/%s",
|
||||
other->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
|
||||
@@ -489,7 +489,7 @@ static int live_slpc_tile_interaction(void *arg)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_gt(gt, i915, i) {
|
||||
threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
|
||||
threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
|
||||
|
||||
if (IS_ERR(threads[i].worker)) {
|
||||
ret = PTR_ERR(threads[i].worker);
|
||||
|
||||
@@ -492,7 +492,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
|
||||
for (n = 0; n < ncpus; n++) {
|
||||
struct kthread_worker *worker;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/%d", n);
|
||||
worker = kthread_run_worker(0, "igt/%d", n);
|
||||
if (IS_ERR(worker)) {
|
||||
ret = PTR_ERR(worker);
|
||||
ncpus = n;
|
||||
@@ -1645,7 +1645,7 @@ static int live_parallel_engines(void *arg)
|
||||
for_each_uabi_engine(engine, i915) {
|
||||
struct kthread_worker *worker;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/parallel:%s",
|
||||
worker = kthread_run_worker(0, "igt/parallel:%s",
|
||||
engine->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
@@ -1806,7 +1806,7 @@ static int live_breadcrumbs_smoketest(void *arg)
|
||||
unsigned int i = idx * ncpus + n;
|
||||
struct kthread_worker *worker;
|
||||
|
||||
worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
|
||||
worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
|
||||
if (IS_ERR(worker)) {
|
||||
ret = PTR_ERR(worker);
|
||||
goto out_flush;
|
||||
@@ -3219,7 +3219,7 @@ static int perf_parallel_engines(void *arg)
|
||||
|
||||
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
|
||||
|
||||
worker = kthread_create_worker(0, "igt:%s",
|
||||
worker = kthread_run_worker(0, "igt:%s",
|
||||
engine->name);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
|
||||
@@ -109,7 +109,7 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev)
|
||||
|
||||
mutex_init(&kms->dump_mutex);
|
||||
|
||||
kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
|
||||
kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot");
|
||||
if (IS_ERR(kms->dump_worker))
|
||||
DRM_ERROR("failed to create disp state task\n");
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
|
||||
timer->kms = kms;
|
||||
timer->crtc_idx = crtc_idx;
|
||||
|
||||
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
|
||||
timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
|
||||
if (IS_ERR(timer->worker)) {
|
||||
int ret = PTR_ERR(timer->worker);
|
||||
timer->worker = NULL;
|
||||
|
||||
@@ -859,7 +859,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
gpu->funcs = funcs;
|
||||
gpu->name = name;
|
||||
|
||||
gpu->worker = kthread_create_worker(0, "gpu-worker");
|
||||
gpu->worker = kthread_run_worker(0, "gpu-worker");
|
||||
if (IS_ERR(gpu->worker)) {
|
||||
ret = PTR_ERR(gpu->worker);
|
||||
gpu->worker = NULL;
|
||||
|
||||
@@ -268,7 +268,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
|
||||
/* initialize event thread */
|
||||
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
|
||||
ev_thread->dev = ddev;
|
||||
ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
|
||||
ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
|
||||
if (IS_ERR(ev_thread->worker)) {
|
||||
ret = PTR_ERR(ev_thread->worker);
|
||||
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
|
||||
|
||||
@@ -271,7 +271,7 @@ static int wave5_vpu_probe(struct platform_device *pdev)
|
||||
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
|
||||
hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
|
||||
dev->hrtimer.function = &wave5_vpu_timer_callback;
|
||||
dev->worker = kthread_create_worker(0, "vpu_irq_thread");
|
||||
dev->worker = kthread_run_worker(0, "vpu_irq_thread");
|
||||
if (IS_ERR(dev->worker)) {
|
||||
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
|
||||
ret = PTR_ERR(dev->worker);
|
||||
|
||||
@@ -394,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
|
||||
kthread_init_delayed_work(&chip->irq_poll_work,
|
||||
mv88e6xxx_irq_poll);
|
||||
|
||||
chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
|
||||
chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
|
||||
if (IS_ERR(chip->kworker))
|
||||
return PTR_ERR(chip->kworker);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user