From cf8b87bbec2c0d4ced949ed74ee34f84338fbd42 Mon Sep 17 00:00:00 2001 From: Liang Chen Date: Thu, 28 Jul 2022 09:35:51 +0800 Subject: [PATCH] sched: optimize prio for kernel RT thread and kworker In some cases, there are too much userspace high priority RT threads, which cause kernel RT threads or kworkers block too long time. This config separate kernel and userspace RT threads into two priority regions, priority 0~49 for kernel and priority 50~99 for userspace, so that kernel RT threads is always higher priority than userspace. This config also set RT policy for kworkers. Change-Id: I87e03915dc0dd03cbcd91d211d2ef56c301451f9 Signed-off-by: Liang Chen --- drivers/soc/rockchip/Kconfig | 10 ++++++++++ kernel/sched/core.c | 8 ++++++++ kernel/workqueue.c | 10 ++++++++++ 3 files changed, 28 insertions(+) diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig index 5ff2a67a76c4..1c784d042e6a 100644 --- a/drivers/soc/rockchip/Kconfig +++ b/drivers/soc/rockchip/Kconfig @@ -91,6 +91,16 @@ config ROCKCHIP_OPP help Say y here to enable rockchip OPP support. +config ROCKCHIP_OPTIMIZE_RT_PRIO + bool "Rockchip optimize prio for kernel RT thread and kworker" + depends on NO_GKI + help + In some cases, there are too much userspace high priority RT threads, which + cause kernel RT threads or kworkers block too long time. This config separate + kernel and userspace RT threads into two priority regions, priority 0~49 for + kernel and priority 50~99 for userspace, so that kernel RT threads is always + higher priority than userspace. This config also set RT policy for kworkers. + config ROCKCHIP_PERFORMANCE bool "Rockchip performance configuration support" depends on NO_GKI diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df7bcd1afb78..6ea0e3ceabea 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5738,6 +5738,14 @@ static int _sched_setscheduler(struct task_struct *p, int policy, .sched_nice = PRIO_TO_NICE(p->static_prio), }; + if (IS_ENABLED(CONFIG_ROCKCHIP_OPTIMIZE_RT_PRIO) && + ((policy == SCHED_FIFO) || (policy == SCHED_RR))) { + attr.sched_priority /= 2; + if (!check) + attr.sched_priority += MAX_RT_PRIO / 2; + if (!attr.sched_priority) + attr.sched_priority = 1; + } /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2b9c0cdc5601..50ab893aeaf7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -51,6 +51,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -1959,6 +1960,15 @@ static struct worker *create_worker(struct worker_pool *pool) goto fail; set_user_nice(worker->task, pool->attrs->nice); + if (IS_ENABLED(CONFIG_ROCKCHIP_OPTIMIZE_RT_PRIO)) { + struct sched_param param; + + if (pool->attrs->nice == 0) + param.sched_priority = MAX_RT_PRIO / 2 - 4; + else + param.sched_priority = MAX_RT_PRIO / 2 - 2; + sched_setscheduler_nocheck(worker->task, SCHED_RR, ¶m); + } kthread_bind_mask(worker->task, pool->attrs->cpumask); /* successful, attach the worker to the pool */