You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner:
"The scheduler pull request comes with the following updates:
- Prevent a divide by zero issue by validating the input value of
sysctl_sched_time_avg
- Make task state printing consistent all over the place and have
explicit state characters for IDLE and PARKED so they wont be
displayed as 'D' state which confuses tools"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/sysctl: Check user input value of sysctl_sched_time_avg
sched/debug: Add explicit TASK_PARKED printing
sched/debug: Ignore TASK_IDLE for SysRq-W
sched/debug: Add explicit TASK_IDLE printing
sched/tracing: Use common task-state helpers
sched/tracing: Fix trace_sched_switch task-state printing
sched/debug: Remove unused variable
sched/debug: Convert TASK_state to hex
sched/debug: Implement consistent task-state printing
This commit is contained in:
+23
-1
@@ -5166,6 +5166,28 @@ void sched_show_task(struct task_struct *p)
|
||||
put_task_stack(p);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
state_filter_match(unsigned long state_filter, struct task_struct *p)
|
||||
{
|
||||
/* no filter, everything matches */
|
||||
if (!state_filter)
|
||||
return true;
|
||||
|
||||
/* filter, but doesn't match */
|
||||
if (!(p->state & state_filter))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
|
||||
* TASK_KILLABLE).
|
||||
*/
|
||||
if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void show_state_filter(unsigned long state_filter)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
@@ -5188,7 +5210,7 @@ void show_state_filter(unsigned long state_filter)
|
||||
*/
|
||||
touch_nmi_watchdog();
|
||||
touch_all_softlockup_watchdogs();
|
||||
if (!state_filter || (p->state & state_filter))
|
||||
if (state_filter_match(state_filter, p))
|
||||
sched_show_task(p);
|
||||
}
|
||||
|
||||
|
||||
@@ -466,8 +466,6 @@ static char *task_group_path(struct task_group *tg)
|
||||
}
|
||||
#endif
|
||||
|
||||
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
|
||||
|
||||
static void
|
||||
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
|
||||
+2
-1
@@ -367,7 +367,8 @@ static struct ctl_table kern_table[] = {
|
||||
.data = &sysctl_sched_time_avg,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
},
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
{
|
||||
|
||||
@@ -656,15 +656,6 @@ int trace_print_lat_context(struct trace_iterator *iter)
|
||||
return !trace_seq_has_overflowed(s);
|
||||
}
|
||||
|
||||
static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
|
||||
|
||||
static int task_state_char(unsigned long state)
|
||||
{
|
||||
int bit = state ? __ffs(state) + 1 : 0;
|
||||
|
||||
return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_find_event - find a registered event
|
||||
* @type: the type of event to look for
|
||||
@@ -930,8 +921,8 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
|
||||
T = task_state_char(field->next_state);
|
||||
S = task_state_char(field->prev_state);
|
||||
T = __task_state_to_char(field->next_state);
|
||||
S = __task_state_to_char(field->prev_state);
|
||||
trace_find_cmdline(field->next_pid, comm);
|
||||
trace_seq_printf(&iter->seq,
|
||||
" %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
|
||||
@@ -966,8 +957,8 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
|
||||
trace_assign_type(field, iter->ent);
|
||||
|
||||
if (!S)
|
||||
S = task_state_char(field->prev_state);
|
||||
T = task_state_char(field->next_state);
|
||||
S = __task_state_to_char(field->prev_state);
|
||||
T = __task_state_to_char(field->next_state);
|
||||
trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
|
||||
field->prev_pid,
|
||||
field->prev_prio,
|
||||
@@ -1002,8 +993,8 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
|
||||
trace_assign_type(field, iter->ent);
|
||||
|
||||
if (!S)
|
||||
S = task_state_char(field->prev_state);
|
||||
T = task_state_char(field->next_state);
|
||||
S = __task_state_to_char(field->prev_state);
|
||||
T = __task_state_to_char(field->next_state);
|
||||
|
||||
SEQ_PUT_HEX_FIELD(s, field->prev_pid);
|
||||
SEQ_PUT_HEX_FIELD(s, field->prev_prio);
|
||||
|
||||
@@ -397,10 +397,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->prev_pid = prev->pid;
|
||||
entry->prev_prio = prev->prio;
|
||||
entry->prev_state = prev->state;
|
||||
entry->prev_state = __get_task_state(prev);
|
||||
entry->next_pid = next->pid;
|
||||
entry->next_prio = next->prio;
|
||||
entry->next_state = next->state;
|
||||
entry->next_state = __get_task_state(next);
|
||||
entry->next_cpu = task_cpu(next);
|
||||
|
||||
if (!call_filter_check_discard(call, entry, buffer, event))
|
||||
@@ -425,10 +425,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->prev_pid = curr->pid;
|
||||
entry->prev_prio = curr->prio;
|
||||
entry->prev_state = curr->state;
|
||||
entry->prev_state = __get_task_state(curr);
|
||||
entry->next_pid = wakee->pid;
|
||||
entry->next_prio = wakee->prio;
|
||||
entry->next_state = wakee->state;
|
||||
entry->next_state = __get_task_state(wakee);
|
||||
entry->next_cpu = task_cpu(wakee);
|
||||
|
||||
if (!call_filter_check_discard(call, entry, buffer, event))
|
||||
|
||||
Reference in New Issue
Block a user