2007-10-18 23:39:30 -07:00
/*
* Generic process-grouping system.
*
* Based originally on the cpuset system, extracted by Paul Menage
* Copyright (C) 2006 Google, Inc
*
2010-03-10 15:22:20 -08:00
* Notifications support
* Copyright (C) 2009 Nokia Corporation
* Author: Kirill A. Shutemov
*
2007-10-18 23:39:30 -07:00
* Copyright notices from the original cpuset code:
* --------------------------------------------------
* Copyright (C) 2003 BULL SA.
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
*
* Portions derived from Patrick Mochel's sysfs code.
* sysfs is Copyright (c) 2001-3 Patrick Mochel
*
* 2003-10-10 Written by Simon Derr.
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
* ---------------------------------------------------
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
2014-04-25 18:28:03 -04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2007-10-18 23:39:30 -07:00
# include <linux/cgroup.h>
2011-06-02 21:20:51 +10:00
# include <linux/cred.h>
2009-09-23 15:56:19 -07:00
# include <linux/ctype.h>
2007-10-18 23:39:30 -07:00
# include <linux/errno.h>
2011-06-02 21:20:51 +10:00
# include <linux/init_task.h>
2007-10-18 23:39:30 -07:00
# include <linux/kernel.h>
# include <linux/list.h>
2014-04-26 15:40:28 +08:00
# include <linux/magic.h>
2007-10-18 23:39:30 -07:00
# include <linux/mm.h>
# include <linux/mutex.h>
# include <linux/mount.h>
# include <linux/pagemap.h>
2007-10-18 23:39:35 -07:00
# include <linux/proc_fs.h>
2007-10-18 23:39:30 -07:00
# include <linux/rcupdate.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
2015-09-16 12:53:17 -04:00
# include <linux/percpu-rwsem.h>
2007-10-18 23:39:30 -07:00
# include <linux/string.h>
2007-10-18 23:39:32 -07:00
# include <linux/sort.h>
2007-10-18 23:39:38 -07:00
# include <linux/kmod.h>
2007-10-18 23:39:44 -07:00
# include <linux/delayacct.h>
# include <linux/cgroupstats.h>
2013-01-10 11:49:27 +08:00
# include <linux/hashtable.h>
2009-07-29 15:04:04 -07:00
# include <linux/pid_namespace.h>
2009-09-23 15:56:23 -07:00
# include <linux/idr.h>
2009-09-23 15:56:28 -07:00
# include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
2012-04-21 09:13:46 +02:00
# include <linux/kthread.h>
2014-02-12 09:29:50 -05:00
# include <linux/delay.h>
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2016-01-19 12:18:41 -05:00
# include <linux/cpuset.h>
2016-01-29 02:54:06 -06:00
# include <linux/proc_ns.h>
# include <linux/nsproxy.h>
2016-06-30 10:28:42 -07:00
# include <linux/file.h>
2015-12-07 17:38:53 -05:00
# include <net/sock.h>
2007-10-18 23:39:30 -07:00
2016-08-10 11:23:44 -04:00
# define CREATE_TRACE_POINTS
# include <trace/events/cgroup.h>
2013-11-29 10:42:58 -05:00
/*
* pidlists linger the following amount before being destroyed. The goal
* is avoiding frequent destruction in the middle of consecutive read calls
* Expiring in the middle is a performance problem not a correctness one.
* 1 sec should be enough.
*/
# define CGROUP_PIDLIST_DESTROY_DELAY HZ
2014-02-11 11:52:48 -05:00
# define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
MAX_CFTYPE_NAME + 2)
2011-12-12 18:12:21 -08:00
/*
* cgroup_mutex is the master lock. Any modification to cgroup or its
* hierarchy must be performed while holding it.
*
2015-10-15 16:41:53 -04:00
* css_set_lock protects task->cgroups pointer, the list of css_set
2014-02-25 10:04:03 -05:00
* objects, and the chain of tasks off each css_set.
2011-12-12 18:12:21 -08:00
*
2014-02-25 10:04:03 -05:00
* These locks are exported if CONFIG_PROVE_RCU so that accessors in
* cgroup.h can use them for lockdep annotations.
2011-12-12 18:12:21 -08:00
*/
2013-04-07 09:29:51 -07:00
# ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX ( cgroup_mutex ) ;
2015-10-15 16:41:53 -04:00
DEFINE_SPINLOCK ( css_set_lock ) ;
2014-02-25 10:04:03 -05:00
EXPORT_SYMBOL_GPL ( cgroup_mutex ) ;
2015-10-15 16:41:53 -04:00
EXPORT_SYMBOL_GPL ( css_set_lock ) ;
2013-04-07 09:29:51 -07:00
# else
2007-10-18 23:39:38 -07:00
static DEFINE_MUTEX ( cgroup_mutex ) ;
2015-10-15 16:41:53 -04:00
static DEFINE_SPINLOCK ( css_set_lock ) ;
2013-04-07 09:29:51 -07:00
# endif
2014-05-04 15:09:13 -04:00
/*
2014-05-04 15:09:14 -04:00
* Protects cgroup_idr and css_idr so that IDs can be released without
* grabbing cgroup_mutex.
2014-05-04 15:09:13 -04:00
*/
static DEFINE_SPINLOCK ( cgroup_idr_lock ) ;
2015-11-05 00:12:24 -05:00
/*
* Protects cgroup_file->kn for !self csses. It synchronizes notifications
* against file removal/re-creation across css hiding.
*/
static DEFINE_SPINLOCK ( cgroup_file_kn_lock ) ;
2014-02-08 10:36:58 -05:00
/*
* Protects cgroup_subsys->release_agent_path. Modifying it also requires
* cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
*/
static DEFINE_SPINLOCK ( release_agent_path_lock ) ;
2007-10-18 23:39:38 -07:00
2015-09-16 12:53:17 -04:00
struct percpu_rw_semaphore cgroup_threadgroup_rwsem ;
2014-05-13 12:19:23 -04:00
# define cgroup_assert_mutex_or_rcu_locked() \
2015-06-18 15:50:02 -07:00
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&cgroup_mutex), \
2014-05-13 12:19:23 -04:00
"cgroup_mutex or RCU read lock required");
2013-12-06 15:11:56 -05:00
2013-11-22 17:14:39 -05:00
/*
* cgroup destruction makes heavy use of work items and there can be a lot
* of concurrent destructions. Use a separate workqueue so that cgroup
* destruction work items don't end up filling up max_active of system_wq
* which may lead to deadlock.
*/
static struct workqueue_struct * cgroup_destroy_wq ;
2013-11-29 10:42:58 -05:00
/*
* pidlist destructions need to be flushed on cgroup destruction. Use a
* separate workqueue as flush domain.
*/
static struct workqueue_struct * cgroup_pidlist_destroy_wq ;
2014-02-08 10:36:58 -05:00
/* generate an array of cgroup subsystem pointers */
2014-02-08 10:36:58 -05:00
# define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
2014-02-08 10:36:58 -05:00
static struct cgroup_subsys * cgroup_subsys [ ] = {
2007-10-18 23:39:30 -07:00
# include <linux/cgroup_subsys.h>
} ;
2014-02-08 10:36:58 -05:00
# undef SUBSYS
/* array of cgroup subsystem names */
# define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
static const char * cgroup_subsys_name [ ] = {
# include <linux/cgroup_subsys.h>
} ;
# undef SUBSYS
2007-10-18 23:39:30 -07:00
2015-09-18 11:56:28 -04:00
/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
# define SUBSYS(_x) \
DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \
DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \
EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \
EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
# include <linux/cgroup_subsys.h>
# undef SUBSYS
# define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
static struct static_key_true * cgroup_subsys_enabled_key [ ] = {
# include <linux/cgroup_subsys.h>
} ;
# undef SUBSYS
# define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
static struct static_key_true * cgroup_subsys_on_dfl_key [ ] = {
# include <linux/cgroup_subsys.h>
} ;
# undef SUBSYS
2007-10-18 23:39:30 -07:00
/*
2014-03-19 10:23:54 -04:00
* The default hierarchy, reserved for the subsystems that are otherwise
2013-06-24 15:21:47 -07:00
* unattached - it never has more than a single cgroup, and all tasks are
* part of that cgroup.
2007-10-18 23:39:30 -07:00
*/
2014-03-19 10:23:55 -04:00
struct cgroup_root cgrp_dfl_root ;
2015-08-05 16:03:19 -04:00
EXPORT_SYMBOL_GPL ( cgrp_dfl_root ) ;
2013-06-24 15:21:47 -07:00
2014-03-19 10:23:55 -04:00
/*
* The default hierarchy always exists but is hidden until mounted for the
* first time. This is for backward compatibility.
*/
2016-02-23 10:00:50 -05:00
static bool cgrp_dfl_visible ;
2007-10-18 23:39:30 -07:00
2016-02-11 13:34:49 -05:00
/* Controllers blocked by the commandline in v1 */
2016-02-22 22:25:47 -05:00
static u16 cgroup_no_v1_mask ;
2016-02-11 13:34:49 -05:00
2014-05-14 19:33:07 -04:00
/* some controllers are not supported in the default hierarchy */
2016-02-23 10:00:50 -05:00
static u16 cgrp_dfl_inhibit_ss_mask ;
2014-05-14 19:33:07 -04:00
2016-03-08 11:51:26 -05:00
/* some controllers are implicitly enabled on the default hierarchy */
static unsigned long cgrp_dfl_implicit_ss_mask ;
2007-10-18 23:39:30 -07:00
/* The list of hierarchy roots */
2013-06-24 15:21:47 -07:00
static LIST_HEAD ( cgroup_roots ) ;
static int cgroup_root_count ;
2007-10-18 23:39:30 -07:00
2014-02-08 10:37:01 -05:00
/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
2013-04-14 11:36:58 -07:00
static DEFINE_IDR ( cgroup_hierarchy_idr ) ;
2009-09-23 15:56:23 -07:00
2013-06-18 18:53:53 +08:00
/*
2014-05-16 13:22:49 -04:00
* Assign a monotonically increasing serial number to csses. It guarantees
* cgroups with bigger numbers are newer than those with smaller numbers.
* Also, as csses are always appended to the parent's ->children list, it
* guarantees that sibling csses are always sorted in the ascending serial
* number order on the list. Protected by cgroup_mutex.
2013-06-18 18:53:53 +08:00
*/
2014-05-16 13:22:49 -04:00
static u64 css_serial_nr_next = 1 ;
2013-06-18 18:53:53 +08:00
2015-06-06 10:02:14 +10:00
/*
* These bitmask flags indicate whether tasks in the fork and exit paths have
* fork/exit handlers to call. This avoids us having to do extra work in the
* fork/exit path to check which subsystems have fork/exit callbacks.
2007-10-18 23:39:30 -07:00
*/
2016-02-22 22:25:47 -05:00
static u16 have_fork_callback __read_mostly ;
static u16 have_exit_callback __read_mostly ;
static u16 have_free_callback __read_mostly ;
2007-10-18 23:39:30 -07:00
2016-01-29 02:54:06 -06:00
/* cgroup namespace for init task */
struct cgroup_namespace init_cgroup_ns = {
. count = { . counter = 2 , } ,
. user_ns = & init_user_ns ,
. ns . ops = & cgroupns_operations ,
. ns . inum = PROC_CGROUP_INIT_INO ,
. root_cset = & init_css_set ,
} ;
2015-06-09 21:32:09 +10:00
/* Ditto for the can_fork callback. */
2016-02-22 22:25:47 -05:00
static u16 have_canfork_callback __read_mostly ;
2015-06-09 21:32:09 +10:00
2015-11-16 11:13:34 -05:00
static struct file_system_type cgroup2_fs_type ;
2014-07-15 11:05:09 -04:00
static struct cftype cgroup_dfl_base_files [ ] ;
static struct cftype cgroup_legacy_base_files [ ] ;
2013-06-28 16:24:11 -07:00
2016-02-22 22:25:47 -05:00
static int rebind_subsystems ( struct cgroup_root * dst_root , u16 ss_mask ) ;
2016-03-03 09:58:00 -05:00
static void cgroup_lock_and_drain_offline ( struct cgroup * cgrp ) ;
2016-03-03 09:58:01 -05:00
static int cgroup_apply_control ( struct cgroup * cgrp ) ;
static void cgroup_finalize_control ( struct cgroup * cgrp , int ret ) ;
2015-10-15 16:41:52 -04:00
static void css_task_iter_advance ( struct css_task_iter * it ) ;
2012-11-19 08:13:37 -08:00
static int cgroup_destroy_locked ( struct cgroup * cgrp ) ;
2016-03-03 09:57:58 -05:00
static struct cgroup_subsys_state * css_create ( struct cgroup * cgrp ,
struct cgroup_subsys * ss ) ;
2014-05-14 09:15:02 -04:00
static void css_release ( struct percpu_ref * ref ) ;
2014-04-23 11:13:16 -04:00
static void kill_css ( struct cgroup_subsys_state * css ) ;
2015-09-18 17:54:23 -04:00
static int cgroup_addrm_files ( struct cgroup_subsys_state * css ,
struct cgroup * cgrp , struct cftype cfts [ ] ,
2013-08-08 20:11:23 -04:00
bool is_add ) ;
2012-11-19 08:13:37 -08:00
2015-09-18 11:56:28 -04:00
/**
* cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
* @ssid: subsys ID of interest
*
* cgroup_subsys_enabled() can only be used with literal subsys names which
* is fine for individual subsystems but unsuitable for cgroup core. This
* is slower static_key_enabled() based test indexed by @ssid.
*/
static bool cgroup_ssid_enabled ( int ssid )
{
2016-03-15 00:21:06 +01:00
if ( CGROUP_SUBSYS_COUNT = = 0 )
return false ;
2015-09-18 11:56:28 -04:00
return static_key_enabled ( cgroup_subsys_enabled_key [ ssid ] ) ;
}
2016-02-11 13:34:49 -05:00
static bool cgroup_ssid_no_v1 ( int ssid )
{
return cgroup_no_v1_mask & ( 1 < < ssid ) ;
}
2015-09-18 11:56:28 -04:00
/**
* cgroup_on_dfl - test whether a cgroup is on the default hierarchy
* @cgrp: the cgroup of interest
*
* The default hierarchy is the v2 interface of cgroup and this function
* can be used to test whether a cgroup is on the default hierarchy for
* cases where a subsystem should behave differnetly depending on the
* interface version.
*
* The set of behaviors which change on the default hierarchy are still
* being determined and the mount option is prefixed with __DEVEL__.
*
* List of changed behaviors:
*
* - Mount options "noprefix", "xattr", "clone_children", "release_agent"
* and "name" are disallowed.
*
* - When mounting an existing superblock, mount options should match.
*
* - Remount is disallowed.
*
* - rename(2) is disallowed.
*
* - "tasks" is removed. Everything should be at process granularity. Use
* "cgroup.procs" instead.
*
* - "cgroup.procs" is not sorted. pids will be unique unless they got
* recycled inbetween reads.
*
* - "release_agent" and "notify_on_release" are removed. Replacement
* notification mechanism will be implemented.
*
* - "cgroup.clone_children" is removed.
*
* - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
* and its descendants contain no task; otherwise, 1. The file also
* generates kernfs notification which can be monitored through poll and
* [di]notify when the value of the file changes.
*
* - cpuset: tasks will be kept in empty cpusets when hotplug happens and
* take masks of ancestors with non-empty cpus/mems, instead of being
* moved to an ancestor.
*
* - cpuset: a task can be moved into an empty cpuset, and again it takes
* masks of ancestors.
*
* - memcg: use_hierarchy is on by default and the cgroup file for the flag
* is not created.
*
* - blkcg: blk-throttle becomes properly hierarchical.
*
* - debug: disallowed on the default hierarchy.
*/
static bool cgroup_on_dfl ( const struct cgroup * cgrp )
{
return cgrp - > root = = & cgrp_dfl_root ;
}
2014-05-04 15:09:13 -04:00
/* IDR wrappers which synchronize using cgroup_idr_lock */
static int cgroup_idr_alloc ( struct idr * idr , void * ptr , int start , int end ,
gfp_t gfp_mask )
{
int ret ;
idr_preload ( gfp_mask ) ;
2014-05-13 12:10:59 -04:00
spin_lock_bh ( & cgroup_idr_lock ) ;
2015-11-06 16:28:21 -08:00
ret = idr_alloc ( idr , ptr , start , end , gfp_mask & ~ __GFP_DIRECT_RECLAIM ) ;
2014-05-13 12:10:59 -04:00
spin_unlock_bh ( & cgroup_idr_lock ) ;
2014-05-04 15:09:13 -04:00
idr_preload_end ( ) ;
return ret ;
}
static void * cgroup_idr_replace ( struct idr * idr , void * ptr , int id )
{
void * ret ;
2014-05-13 12:10:59 -04:00
spin_lock_bh ( & cgroup_idr_lock ) ;
2014-05-04 15:09:13 -04:00
ret = idr_replace ( idr , ptr , id ) ;
2014-05-13 12:10:59 -04:00
spin_unlock_bh ( & cgroup_idr_lock ) ;
2014-05-04 15:09:13 -04:00
return ret ;
}
static void cgroup_idr_remove ( struct idr * idr , int id )
{
2014-05-13 12:10:59 -04:00
spin_lock_bh ( & cgroup_idr_lock ) ;
2014-05-04 15:09:13 -04:00
idr_remove ( idr , id ) ;
2014-05-13 12:10:59 -04:00
spin_unlock_bh ( & cgroup_idr_lock ) ;
2014-05-04 15:09:13 -04:00
}
2014-05-16 13:22:48 -04:00
static struct cgroup * cgroup_parent ( struct cgroup * cgrp )
{
struct cgroup_subsys_state * parent_css = cgrp - > self . parent ;
if ( parent_css )
return container_of ( parent_css , struct cgroup , self ) ;
return NULL ;
}
2016-03-03 09:57:58 -05:00
/* subsystems visibly enabled on a cgroup */
static u16 cgroup_control ( struct cgroup * cgrp )
{
struct cgroup * parent = cgroup_parent ( cgrp ) ;
u16 root_ss_mask = cgrp - > root - > subsys_mask ;
if ( parent )
return parent - > subtree_control ;
if ( cgroup_on_dfl ( cgrp ) )
2016-03-08 11:51:26 -05:00
root_ss_mask & = ~ ( cgrp_dfl_inhibit_ss_mask |
cgrp_dfl_implicit_ss_mask ) ;
2016-03-03 09:57:58 -05:00
return root_ss_mask ;
}
/* subsystems enabled on a cgroup */
static u16 cgroup_ss_mask ( struct cgroup * cgrp )
{
struct cgroup * parent = cgroup_parent ( cgrp ) ;
if ( parent )
return parent - > subtree_ss_mask ;
return cgrp - > root - > subsys_mask ;
}
2013-08-08 20:11:27 -04:00
/**
* cgroup_css - obtain a cgroup's css for the specified subsystem
* @cgrp: the cgroup of interest
2014-05-14 09:15:00 -04:00
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
2013-08-08 20:11:27 -04:00
*
2013-08-26 18:40:56 -04:00
* Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
* function must be called either under cgroup_mutex or rcu_read_lock() and
* the caller is responsible for pinning the returned css if it wants to
* keep accessing it outside the said locks. This function may return
* %NULL if @cgrp doesn't have @subsys_id enabled.
2013-08-08 20:11:27 -04:00
*/
static struct cgroup_subsys_state * cgroup_css ( struct cgroup * cgrp ,
2013-08-26 18:40:56 -04:00
struct cgroup_subsys * ss )
2013-08-08 20:11:27 -04:00
{
2013-08-26 18:40:56 -04:00
if ( ss )
2014-02-08 10:36:58 -05:00
return rcu_dereference_check ( cgrp - > subsys [ ss - > id ] ,
2014-02-11 11:52:47 -05:00
lockdep_is_held ( & cgroup_mutex ) ) ;
2013-08-26 18:40:56 -04:00
else
2014-05-14 09:15:00 -04:00
return & cgrp - > self ;
2013-08-08 20:11:27 -04:00
}
2012-11-19 08:13:37 -08:00
2014-04-23 11:13:14 -04:00
/**
* cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
2014-05-14 09:15:00 -04:00
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
2014-04-23 11:13:14 -04:00
*
2015-04-23 07:57:33 -04:00
* Similar to cgroup_css() but returns the effective css, which is defined
2014-04-23 11:13:14 -04:00
* as the matching css of the nearest ancestor including self which has @ss
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
* function is guaranteed to return non-NULL css.
*/
static struct cgroup_subsys_state * cgroup_e_css ( struct cgroup * cgrp ,
struct cgroup_subsys * ss )
{
lockdep_assert_held ( & cgroup_mutex ) ;
if ( ! ss )
2014-05-14 09:15:00 -04:00
return & cgrp - > self ;
2014-04-23 11:13:14 -04:00
2014-11-18 02:49:52 -05:00
/*
* This function is used while updating css associations and thus
2016-03-03 09:57:58 -05:00
* can't test the csses directly. Test ss_mask.
2014-11-18 02:49:52 -05:00
*/
2016-03-03 09:57:58 -05:00
while ( ! ( cgroup_ss_mask ( cgrp ) & ( 1 < < ss - > id ) ) ) {
2014-05-16 13:22:48 -04:00
cgrp = cgroup_parent ( cgrp ) ;
2016-03-03 09:57:58 -05:00
if ( ! cgrp )
return NULL ;
}
2014-04-23 11:13:14 -04:00
return cgroup_css ( cgrp , ss ) ;
2013-08-08 20:11:27 -04:00
}
2012-11-19 08:13:37 -08:00
2014-11-18 02:49:52 -05:00
/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
*
* Find and get the effective css of @cgrp for @ss. The effective css is
* defined as the matching css of the nearest ancestor including self which
* has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
* the root css is returned, so this function always returns a valid css.
* The returned css must be put using css_put().
*/
struct cgroup_subsys_state * cgroup_get_e_css ( struct cgroup * cgrp ,
struct cgroup_subsys * ss )
{
struct cgroup_subsys_state * css ;
rcu_read_lock ( ) ;
do {
css = cgroup_css ( cgrp , ss ) ;
if ( css & & css_tryget_online ( css ) )
goto out_unlock ;
cgrp = cgroup_parent ( cgrp ) ;
} while ( cgrp ) ;
css = init_css_set . subsys [ ss - > id ] ;
css_get ( css ) ;
out_unlock :
rcu_read_unlock ( ) ;
return css ;
}
2007-10-18 23:39:30 -07:00
/* convenient tests for these bits */
2013-06-12 21:04:53 -07:00
static inline bool cgroup_is_dead ( const struct cgroup * cgrp )
2007-10-18 23:39:30 -07:00
{
2014-05-16 13:22:51 -04:00
return ! ( cgrp - > self . flags & CSS_ONLINE ) ;
2007-10-18 23:39:30 -07:00
}
2015-10-15 16:41:50 -04:00
static void cgroup_get ( struct cgroup * cgrp )
{
WARN_ON_ONCE ( cgroup_is_dead ( cgrp ) ) ;
css_get ( & cgrp - > self ) ;
}
static bool cgroup_tryget ( struct cgroup * cgrp )
{
return css_tryget ( & cgrp - > self ) ;
}
2014-05-13 12:16:21 -04:00
struct cgroup_subsys_state * of_css ( struct kernfs_open_file * of )
2014-02-11 11:52:49 -05:00
{
2014-02-11 11:52:49 -05:00
struct cgroup * cgrp = of - > kn - > parent - > priv ;
2014-05-13 12:16:21 -04:00
struct cftype * cft = of_cft ( of ) ;
2014-02-11 11:52:49 -05:00
/*
* This is open and unprotected implementation of cgroup_css().
* seq_css() is only called from a kernfs file operation which has
* an active reference on the file. Because all the subsystem
* files are drained before a css is disassociated with a cgroup,
* the matching css from the cgroup's subsys table is guaranteed to
* be and stay valid until the enclosing operation is complete.
*/
if ( cft - > ss )
return rcu_dereference_raw ( cgrp - > subsys [ cft - > ss - > id ] ) ;
else
2014-05-14 09:15:00 -04:00
return & cgrp - > self ;
2014-02-11 11:52:49 -05:00
}
2014-05-13 12:16:21 -04:00
EXPORT_SYMBOL_GPL ( of_css ) ;
2014-02-11 11:52:49 -05:00
2008-02-07 00:13:46 -08:00
static int notify_on_release ( const struct cgroup * cgrp )
2007-10-18 23:39:38 -07:00
{
2007-10-18 23:40:44 -07:00
return test_bit ( CGRP_NOTIFY_ON_RELEASE , & cgrp - > flags ) ;
2007-10-18 23:39:38 -07:00
}
2013-06-25 11:53:37 -07:00
/**
2013-12-06 15:11:56 -05:00
* for_each_css - iterate all css's of a cgroup
* @css: the iteration cursor
* @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
* @cgrp: the target cgroup to iterate css's of
2013-06-25 11:53:37 -07:00
*
2014-04-23 11:13:14 -04:00
* Should be called under cgroup_[tree_]mutex.
2013-06-25 11:53:37 -07:00
*/
2013-12-06 15:11:56 -05:00
# define for_each_css(css, ssid, cgrp) \
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
if (!((css) = rcu_dereference_check( \
(cgrp)->subsys[(ssid)], \
lockdep_is_held(&cgroup_mutex)))) { } \
else
2014-04-23 11:13:14 -04:00
/**
* for_each_e_css - iterate all effective css's of a cgroup
* @css: the iteration cursor
* @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
* @cgrp: the target cgroup to iterate css's of
*
* Should be called under cgroup_[tree_]mutex.
*/
# define for_each_e_css(css, ssid, cgrp) \
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
; \
else
2013-06-25 11:53:37 -07:00
/**
2014-02-08 10:36:58 -05:00
* for_each_subsys - iterate all enabled cgroup subsystems
2013-06-25 11:53:37 -07:00
* @ss: the iteration cursor
2013-12-06 15:11:56 -05:00
* @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
2013-06-25 11:53:37 -07:00
*/
2013-12-06 15:11:56 -05:00
# define for_each_subsys(ss, ssid) \
2014-02-08 10:36:58 -05:00
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
(((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
2013-06-25 11:53:37 -07:00
2015-06-06 10:02:14 +10:00
/**
2016-02-22 22:25:46 -05:00
* do_each_subsys_mask - filter for_each_subsys with a bitmask
2015-06-06 10:02:14 +10:00
* @ss: the iteration cursor
* @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
2016-02-22 22:25:46 -05:00
* @ss_mask: the bitmask
2015-06-06 10:02:14 +10:00
*
* The block will only run for cases where the ssid-th bit (1 << ssid) of
2016-02-22 22:25:46 -05:00
* @ss_mask is set.
2015-06-06 10:02:14 +10:00
*/
2016-02-22 22:25:46 -05:00
# define do_each_subsys_mask(ss, ssid, ss_mask) do { \
unsigned long __ss_mask = (ss_mask); \
if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */ \
2015-06-09 21:32:07 +10:00
(ssid) = 0; \
2016-02-22 22:25:46 -05:00
break; \
} \
for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \
(ss) = cgroup_subsys[ssid]; \
{
# define while_each_subsys_mask() \
} \
} \
} while (false)
2015-06-06 10:02:14 +10:00
2014-03-19 10:23:53 -04:00
/* iterate across the hierarchies */
# define for_each_root(root) \
2013-06-24 15:21:48 -07:00
list_for_each_entry((root), &cgroup_roots, root_list)
2007-10-18 23:39:30 -07:00
2014-04-23 11:13:16 -04:00
/* iterate over child cgrps, lock should be held throughout iteration */
# define cgroup_for_each_live_child(child, cgrp) \
2014-05-16 13:22:48 -04:00
list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
2014-05-13 12:19:23 -04:00
if (({ lockdep_assert_held(&cgroup_mutex); \
2014-04-23 11:13:16 -04:00
cgroup_is_dead(child); })) \
; \
else
2013-04-07 09:29:51 -07:00
2016-03-03 09:57:59 -05:00
/* walk live descendants in preorder */
# define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
if (({ lockdep_assert_held(&cgroup_mutex); \
(dsct) = (d_css)->cgroup; \
cgroup_is_dead(dsct); })) \
; \
else
/* walk live descendants in postorder */
# define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \
css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \
if (({ lockdep_assert_held(&cgroup_mutex); \
(dsct) = (d_css)->cgroup; \
cgroup_is_dead(dsct); })) \
; \
else
2007-10-18 23:39:38 -07:00
static void cgroup_release_agent ( struct work_struct * work ) ;
2007-10-18 23:40:44 -07:00
static void check_for_release ( struct cgroup * cgrp ) ;
2007-10-18 23:39:38 -07:00
2013-06-12 21:04:50 -07:00
/*
* A cgroup can be associated with multiple css_sets as different tasks may
* belong to different cgroups on different hierarchies. In the other
* direction, a css_set is naturally associated with multiple cgroups.
* This M:N relationship is represented by the following link structure
* which exists for each association and allows traversing the associations
* from both sides.
*/
struct cgrp_cset_link {
/* the cgroup and css_set this link associates */
struct cgroup * cgrp ;
struct css_set * cset ;
/* list of cgrp_cset_links anchored at cgrp->cset_links */
struct list_head cset_link ;
/* list of cgrp_cset_links anchored at css_set->cgrp_links */
struct list_head cgrp_link ;
2007-10-18 23:39:36 -07:00
} ;
2014-03-19 10:23:53 -04:00
/*
* The default css_set - used by init and its children prior to any
2007-10-18 23:39:36 -07:00
* hierarchies being mounted. It contains a pointer to the root state
* for each subsystem. Also used to anchor the list of css_sets. Not
* reference-counted, to improve performance when child cgroups
* haven't been created.
*/
2014-05-07 21:31:17 -04:00
struct css_set init_css_set = {
2014-03-19 10:23:53 -04:00
. refcount = ATOMIC_INIT ( 1 ) ,
. cgrp_links = LIST_HEAD_INIT ( init_css_set . cgrp_links ) ,
. tasks = LIST_HEAD_INIT ( init_css_set . tasks ) ,
. mg_tasks = LIST_HEAD_INIT ( init_css_set . mg_tasks ) ,
. mg_preload_node = LIST_HEAD_INIT ( init_css_set . mg_preload_node ) ,
. mg_node = LIST_HEAD_INIT ( init_css_set . mg_node ) ,
2015-10-15 16:41:52 -04:00
. task_iters = LIST_HEAD_INIT ( init_css_set . task_iters ) ,
2014-03-19 10:23:53 -04:00
} ;
2007-10-18 23:39:36 -07:00
2014-03-19 10:23:53 -04:00
static int css_set_count = 1 ; /* 1 for init_css_set */
2007-10-18 23:39:36 -07:00
2015-10-15 16:41:49 -04:00
/**
* css_set_populated - does a css_set contain any tasks?
* @cset: target css_set
*/
static bool css_set_populated ( struct css_set * cset )
{
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2015-10-15 16:41:49 -04:00
return ! list_empty ( & cset - > tasks ) | | ! list_empty ( & cset - > mg_tasks ) ;
}
2014-04-25 18:28:02 -04:00
/**
* cgroup_update_populated - updated populated count of a cgroup
* @cgrp: the target cgroup
* @populated: inc or dec populated count
*
2015-10-15 16:41:49 -04:00
* One of the css_sets associated with @cgrp is either getting its first
* task or losing the last. Update @cgrp->populated_cnt accordingly. The
* count is propagated towards root so that a given cgroup's populated_cnt
* is zero iff the cgroup and all its descendants don't contain any tasks.
2014-04-25 18:28:02 -04:00
*
* @cgrp's interface file "cgroup.populated" is zero if
* @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt
* changes from or to zero, userland is notified that the content of the
* interface file has changed. This can be used to detect when @cgrp and
* its descendants become populated or empty.
*/
static void cgroup_update_populated ( struct cgroup * cgrp , bool populated )
{
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2014-04-25 18:28:02 -04:00
do {
bool trigger ;
if ( populated )
trigger = ! cgrp - > populated_cnt + + ;
else
trigger = ! - - cgrp - > populated_cnt ;
if ( ! trigger )
break ;
2015-10-15 16:41:50 -04:00
check_for_release ( cgrp ) ;
2015-09-18 17:54:23 -04:00
cgroup_file_notify ( & cgrp - > events_file ) ;
2014-05-16 13:22:48 -04:00
cgrp = cgroup_parent ( cgrp ) ;
2014-04-25 18:28:02 -04:00
} while ( cgrp ) ;
}
2015-10-15 16:41:49 -04:00
/**
* css_set_update_populated - update populated state of a css_set
* @cset: target css_set
* @populated: whether @cset is populated or depopulated
*
* @cset is either getting the first task or losing the last. Update the
* ->populated_cnt of all associated cgroups accordingly.
*/
static void css_set_update_populated ( struct css_set * cset , bool populated )
{
struct cgrp_cset_link * link ;
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2015-10-15 16:41:49 -04:00
list_for_each_entry ( link , & cset - > cgrp_links , cgrp_link )
cgroup_update_populated ( link - > cgrp , populated ) ;
}
2015-10-15 16:41:52 -04:00
/**
* css_set_move_task - move a task from one css_set to another
* @task: task being moved
* @from_cset: css_set @task currently belongs to (may be NULL)
* @to_cset: new css_set @task is being moved to (may be NULL)
* @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
*
* Move @task from @from_cset to @to_cset. If @task didn't belong to any
* css_set, @from_cset can be NULL. If @task is being disassociated
* instead of moved, @to_cset can be NULL.
*
2015-10-15 16:41:52 -04:00
* This function automatically handles populated_cnt updates and
* css_task_iter adjustments but the caller is responsible for managing
* @from_cset and @to_cset's reference counts.
2015-10-15 16:41:52 -04:00
*/
static void css_set_move_task ( struct task_struct * task ,
struct css_set * from_cset , struct css_set * to_cset ,
bool use_mg_tasks )
{
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
2016-03-03 09:57:57 -05:00
if ( to_cset & & ! css_set_populated ( to_cset ) )
css_set_update_populated ( to_cset , true ) ;
2015-10-15 16:41:52 -04:00
if ( from_cset ) {
2015-10-15 16:41:52 -04:00
struct css_task_iter * it , * pos ;
2015-10-15 16:41:52 -04:00
WARN_ON_ONCE ( list_empty ( & task - > cg_list ) ) ;
2015-10-15 16:41:52 -04:00
/*
* @task is leaving, advance task iterators which are
* pointing to it so that they can resume at the next
* position. Advancing an iterator might remove it from
* the list, use safe walk. See css_task_iter_advance*()
* for details.
*/
list_for_each_entry_safe ( it , pos , & from_cset - > task_iters ,
iters_node )
if ( it - > task_pos = = & task - > cg_list )
css_task_iter_advance ( it ) ;
2015-10-15 16:41:52 -04:00
list_del_init ( & task - > cg_list ) ;
if ( ! css_set_populated ( from_cset ) )
css_set_update_populated ( from_cset , false ) ;
} else {
WARN_ON_ONCE ( ! list_empty ( & task - > cg_list ) ) ;
}
if ( to_cset ) {
/*
* We are synchronized through cgroup_threadgroup_rwsem
* against PF_EXITING setting such that we can't race
* against cgroup_exit() changing the css_set to
* init_css_set and dropping the old one.
*/
WARN_ON_ONCE ( task - > flags & PF_EXITING ) ;
rcu_assign_pointer ( task - > cgroups , to_cset ) ;
list_add_tail ( & task - > cg_list , use_mg_tasks ? & to_cset - > mg_tasks :
& to_cset - > tasks ) ;
}
}
2009-09-23 15:56:22 -07:00
/*
* hash table for cgroup groups. This improves the performance to find
* an existing css_set. This hash doesn't (currently) take into
* account cgroups in empty hierarchies.
*/
2008-04-29 01:00:11 -07:00
# define CSS_SET_HASH_BITS 7
2013-01-10 11:49:27 +08:00
static DEFINE_HASHTABLE ( css_set_table , CSS_SET_HASH_BITS ) ;
2008-04-29 01:00:11 -07:00
2013-01-10 11:49:27 +08:00
static unsigned long css_set_hash ( struct cgroup_subsys_state * css [ ] )
2008-04-29 01:00:11 -07:00
{
2013-01-10 11:49:27 +08:00
unsigned long key = 0UL ;
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
int i ;
2008-04-29 01:00:11 -07:00
2013-06-25 11:53:37 -07:00
for_each_subsys ( ss , i )
2013-01-10 11:49:27 +08:00
key + = ( unsigned long ) css [ i ] ;
key = ( key > > 16 ) ^ key ;
2008-04-29 01:00:11 -07:00
2013-01-10 11:49:27 +08:00
return key ;
2008-04-29 01:00:11 -07:00
}
2014-09-19 16:51:00 +08:00
static void put_css_set_locked ( struct css_set * cset )
2007-10-18 23:39:33 -07:00
{
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link , * tmp_link ;
2014-04-23 11:13:15 -04:00
struct cgroup_subsys * ss ;
int ssid ;
2013-06-12 21:04:49 -07:00
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2014-02-13 06:58:40 -05:00
if ( ! atomic_dec_and_test ( & cset - > refcount ) )
2008-10-18 20:28:03 -07:00
return ;
2007-10-18 23:39:38 -07:00
2015-11-23 14:55:41 -05:00
/* This css_set is dead. unlink it and release cgroup and css refs */
for_each_subsys ( ss , ssid ) {
2014-04-23 11:13:15 -04:00
list_del ( & cset - > e_cset_node [ ssid ] ) ;
2015-11-23 14:55:41 -05:00
css_put ( cset - > subsys [ ssid ] ) ;
}
2013-06-12 21:04:49 -07:00
hash_del ( & cset - > hlist ) ;
2009-09-23 15:56:23 -07:00
css_set_count - - ;
2013-06-12 21:04:50 -07:00
list_for_each_entry_safe ( link , tmp_link , & cset - > cgrp_links , cgrp_link ) {
list_del ( & link - > cset_link ) ;
list_del ( & link - > cgrp_link ) ;
2015-10-15 16:41:51 -04:00
if ( cgroup_parent ( link - > cgrp ) )
cgroup_put ( link - > cgrp ) ;
2009-09-23 15:56:23 -07:00
kfree ( link ) ;
2007-10-18 23:39:38 -07:00
}
2009-09-23 15:56:23 -07:00
2013-06-12 21:04:49 -07:00
kfree_rcu ( cset , rcu_head ) ;
2007-10-18 23:39:36 -07:00
}
2014-09-19 16:51:00 +08:00
static void put_css_set ( struct css_set * cset )
2014-02-13 06:58:40 -05:00
{
2016-06-22 17:28:41 -03:00
unsigned long flags ;
2014-02-13 06:58:40 -05:00
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
* rwlock
*/
if ( atomic_add_unless ( & cset - > refcount , - 1 , 1 ) )
return ;
2016-06-22 17:28:41 -03:00
spin_lock_irqsave ( & css_set_lock , flags ) ;
2014-09-19 16:51:00 +08:00
put_css_set_locked ( cset ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irqrestore ( & css_set_lock , flags ) ;
2014-02-13 06:58:40 -05:00
}
2007-10-18 23:39:36 -07:00
/*
* refcounted get/put for css_set objects
*/
2013-06-12 21:04:49 -07:00
static inline void get_css_set ( struct css_set * cset )
2007-10-18 23:39:36 -07:00
{
2013-06-12 21:04:49 -07:00
atomic_inc ( & cset - > refcount ) ;
2007-10-18 23:39:36 -07:00
}
2013-06-24 15:21:48 -07:00
/**
2009-09-23 15:56:22 -07:00
* compare_css_sets - helper function for find_existing_css_set().
2013-06-12 21:04:49 -07:00
* @cset: candidate css_set being tested
* @old_cset: existing css_set for a task
2009-09-23 15:56:22 -07:00
* @new_cgrp: cgroup that's being entered by the task
* @template: desired set of css pointers in css_set (pre-calculated)
*
2013-07-31 16:18:36 +08:00
* Returns true if "cset" matches "old_cset" except for the hierarchy
2009-09-23 15:56:22 -07:00
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
*/
2013-06-12 21:04:49 -07:00
static bool compare_css_sets ( struct css_set * cset ,
struct css_set * old_cset ,
2009-09-23 15:56:22 -07:00
struct cgroup * new_cgrp ,
struct cgroup_subsys_state * template [ ] )
{
struct list_head * l1 , * l2 ;
2014-04-23 11:13:14 -04:00
/*
* On the default hierarchy, there can be csets which are
* associated with the same set of cgroups but different csses.
* Let's first ensure that csses match.
*/
if ( memcmp ( template , cset - > subsys , sizeof ( cset - > subsys ) ) )
2009-09-23 15:56:22 -07:00
return false ;
/*
* Compare cgroup pointers in order to distinguish between
2014-04-23 11:13:14 -04:00
* different cgroups in hierarchies. As different cgroups may
* share the same effective css, this comparison is always
* necessary.
2009-09-23 15:56:22 -07:00
*/
2013-06-12 21:04:50 -07:00
l1 = & cset - > cgrp_links ;
l2 = & old_cset - > cgrp_links ;
2009-09-23 15:56:22 -07:00
while ( 1 ) {
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link1 , * link2 ;
2013-06-12 21:04:49 -07:00
struct cgroup * cgrp1 , * cgrp2 ;
2009-09-23 15:56:22 -07:00
l1 = l1 - > next ;
l2 = l2 - > next ;
/* See if we reached the end - both lists are equal length. */
2013-06-12 21:04:50 -07:00
if ( l1 = = & cset - > cgrp_links ) {
BUG_ON ( l2 ! = & old_cset - > cgrp_links ) ;
2009-09-23 15:56:22 -07:00
break ;
} else {
2013-06-12 21:04:50 -07:00
BUG_ON ( l2 = = & old_cset - > cgrp_links ) ;
2009-09-23 15:56:22 -07:00
}
/* Locate the cgroups associated with these links. */
2013-06-12 21:04:50 -07:00
link1 = list_entry ( l1 , struct cgrp_cset_link , cgrp_link ) ;
link2 = list_entry ( l2 , struct cgrp_cset_link , cgrp_link ) ;
cgrp1 = link1 - > cgrp ;
cgrp2 = link2 - > cgrp ;
2009-09-23 15:56:22 -07:00
/* Hierarchies should be linked in the same order. */
2013-06-12 21:04:49 -07:00
BUG_ON ( cgrp1 - > root ! = cgrp2 - > root ) ;
2009-09-23 15:56:22 -07:00
/*
* If this hierarchy is the hierarchy of the cgroup
* that's changing, then we need to check that this
* css_set points to the new cgroup; if it's any other
* hierarchy, then this css_set should point to the
* same cgroup as the old css_set.
*/
2013-06-12 21:04:49 -07:00
if ( cgrp1 - > root = = new_cgrp - > root ) {
if ( cgrp1 ! = new_cgrp )
2009-09-23 15:56:22 -07:00
return false ;
} else {
2013-06-12 21:04:49 -07:00
if ( cgrp1 ! = cgrp2 )
2009-09-23 15:56:22 -07:00
return false ;
}
}
return true ;
}
2013-06-24 15:21:48 -07:00
/**
* find_existing_css_set - init css array and find the matching css_set
* @old_cset: the css_set that we're using before the cgroup transition
* @cgrp: the cgroup that we're moving into
* @template: out param for the new set of csses, should be clear on entry
2007-10-18 23:39:36 -07:00
*/
2013-06-12 21:04:49 -07:00
static struct css_set * find_existing_css_set ( struct css_set * old_cset ,
struct cgroup * cgrp ,
struct cgroup_subsys_state * template [ ] )
2007-10-18 23:39:36 -07:00
{
2014-03-19 10:23:54 -04:00
struct cgroup_root * root = cgrp - > root ;
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2013-06-12 21:04:49 -07:00
struct css_set * cset ;
2013-01-10 11:49:27 +08:00
unsigned long key ;
2013-06-24 15:21:48 -07:00
int i ;
2007-10-18 23:39:36 -07:00
2010-03-10 15:22:07 -08:00
/*
* Build the set of subsystem state objects that we want to see in the
* new css_set. while subsystems can change globally, the entries here
* won't change, so no need for locking.
*/
2013-06-25 11:53:37 -07:00
for_each_subsys ( ss , i ) {
2014-04-23 11:13:14 -04:00
if ( root - > subsys_mask & ( 1UL < < i ) ) {
2014-04-23 11:13:14 -04:00
/*
* @ss is in this hierarchy, so we want the
* effective css from @cgrp.
*/
template [ i ] = cgroup_e_css ( cgrp , ss ) ;
2007-10-18 23:39:36 -07:00
} else {
2014-04-23 11:13:14 -04:00
/*
* @ss is not in this hierarchy, so we don't want
* to change the css.
*/
2013-06-12 21:04:49 -07:00
template [ i ] = old_cset - > subsys [ i ] ;
2007-10-18 23:39:36 -07:00
}
}
2013-01-10 11:49:27 +08:00
key = css_set_hash ( template ) ;
2013-06-12 21:04:49 -07:00
hash_for_each_possible ( css_set_table , cset , hlist , key ) {
if ( ! compare_css_sets ( cset , old_cset , cgrp , template ) )
2009-09-23 15:56:22 -07:00
continue ;
/* This css_set matches what we need */
2013-06-12 21:04:49 -07:00
return cset ;
2008-04-29 01:00:11 -07:00
}
2007-10-18 23:39:36 -07:00
/* No existing cgroup group matched */
return NULL ;
}
2013-06-12 21:04:50 -07:00
static void free_cgrp_cset_links ( struct list_head * links_to_free )
2007-10-18 23:39:36 -07:00
{
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link , * tmp_link ;
2008-07-25 01:46:55 -07:00
2013-06-12 21:04:50 -07:00
list_for_each_entry_safe ( link , tmp_link , links_to_free , cset_link ) {
list_del ( & link - > cset_link ) ;
2007-10-18 23:39:36 -07:00
kfree ( link ) ;
}
}
2013-06-12 21:04:50 -07:00
/**
* allocate_cgrp_cset_links - allocate cgrp_cset_links
* @count: the number of links to allocate
* @tmp_links: list_head the allocated links are put on
*
* Allocate @count cgrp_cset_link structures and chain them on @tmp_links
* through ->cset_link. Returns 0 on success or -errno.
2008-07-29 22:33:19 -07:00
*/
2013-06-12 21:04:50 -07:00
static int allocate_cgrp_cset_links ( int count , struct list_head * tmp_links )
2008-07-29 22:33:19 -07:00
{
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link ;
2008-07-29 22:33:19 -07:00
int i ;
2013-06-12 21:04:50 -07:00
INIT_LIST_HEAD ( tmp_links ) ;
2008-07-29 22:33:19 -07:00
for ( i = 0 ; i < count ; i + + ) {
2013-06-12 21:04:51 -07:00
link = kzalloc ( sizeof ( * link ) , GFP_KERNEL ) ;
2008-07-29 22:33:19 -07:00
if ( ! link ) {
2013-06-12 21:04:50 -07:00
free_cgrp_cset_links ( tmp_links ) ;
2008-07-29 22:33:19 -07:00
return - ENOMEM ;
}
2013-06-12 21:04:50 -07:00
list_add ( & link - > cset_link , tmp_links ) ;
2008-07-29 22:33:19 -07:00
}
return 0 ;
}
2009-01-07 18:07:42 -08:00
/**
* link_css_set - a helper function to link a css_set to a cgroup
2013-06-12 21:04:50 -07:00
* @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
2013-06-12 21:04:49 -07:00
* @cset: the css_set to be linked
2009-01-07 18:07:42 -08:00
* @cgrp: the destination cgroup
*/
2013-06-12 21:04:50 -07:00
static void link_css_set ( struct list_head * tmp_links , struct css_set * cset ,
struct cgroup * cgrp )
2009-01-07 18:07:42 -08:00
{
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link ;
2009-01-07 18:07:42 -08:00
2013-06-12 21:04:50 -07:00
BUG_ON ( list_empty ( tmp_links ) ) ;
2014-04-23 11:13:16 -04:00
if ( cgroup_on_dfl ( cgrp ) )
cset - > dfl_cgrp = cgrp ;
2013-06-12 21:04:50 -07:00
link = list_first_entry ( tmp_links , struct cgrp_cset_link , cset_link ) ;
link - > cset = cset ;
2009-09-23 15:56:22 -07:00
link - > cgrp = cgrp ;
2014-04-25 18:28:02 -04:00
2009-09-23 15:56:22 -07:00
/*
2015-10-15 16:41:51 -04:00
* Always add links to the tail of the lists so that the lists are
* in choronological order.
2009-09-23 15:56:22 -07:00
*/
2015-10-15 16:41:51 -04:00
list_move_tail ( & link - > cset_link , & cgrp - > cset_links ) ;
2013-06-12 21:04:50 -07:00
list_add_tail ( & link - > cgrp_link , & cset - > cgrp_links ) ;
2015-10-15 16:41:51 -04:00
if ( cgroup_parent ( cgrp ) )
cgroup_get ( cgrp ) ;
2009-01-07 18:07:42 -08:00
}
2013-06-24 15:21:48 -07:00
/**
* find_css_set - return a new css_set with one cgroup updated
* @old_cset: the baseline css_set
* @cgrp: the cgroup to be updated
*
* Return a new css_set that's equivalent to @old_cset, but with @cgrp
* substituted into the appropriate hierarchy.
2007-10-18 23:39:36 -07:00
*/
2013-06-12 21:04:49 -07:00
static struct css_set * find_css_set ( struct css_set * old_cset ,
struct cgroup * cgrp )
2007-10-18 23:39:36 -07:00
{
2013-06-24 15:21:48 -07:00
struct cgroup_subsys_state * template [ CGROUP_SUBSYS_COUNT ] = { } ;
2013-06-12 21:04:49 -07:00
struct css_set * cset ;
2013-06-12 21:04:50 -07:00
struct list_head tmp_links ;
struct cgrp_cset_link * link ;
2014-04-23 11:13:15 -04:00
struct cgroup_subsys * ss ;
2013-01-10 11:49:27 +08:00
unsigned long key ;
2014-04-23 11:13:15 -04:00
int ssid ;
2008-04-29 01:00:11 -07:00
2013-06-24 15:21:48 -07:00
lockdep_assert_held ( & cgroup_mutex ) ;
2007-10-18 23:39:36 -07:00
/* First see if we already have a cgroup group that matches
* the desired set */
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2013-06-12 21:04:49 -07:00
cset = find_existing_css_set ( old_cset , cgrp , template ) ;
if ( cset )
get_css_set ( cset ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2007-10-18 23:39:36 -07:00
2013-06-12 21:04:49 -07:00
if ( cset )
return cset ;
2007-10-18 23:39:36 -07:00
2013-06-12 21:04:51 -07:00
cset = kzalloc ( sizeof ( * cset ) , GFP_KERNEL ) ;
2013-06-12 21:04:49 -07:00
if ( ! cset )
2007-10-18 23:39:36 -07:00
return NULL ;
2013-06-12 21:04:50 -07:00
/* Allocate all the cgrp_cset_link objects that we'll need */
2013-06-24 15:21:47 -07:00
if ( allocate_cgrp_cset_links ( cgroup_root_count , & tmp_links ) < 0 ) {
2013-06-12 21:04:49 -07:00
kfree ( cset ) ;
2007-10-18 23:39:36 -07:00
return NULL ;
}
2013-06-12 21:04:49 -07:00
atomic_set ( & cset - > refcount , 1 ) ;
2013-06-12 21:04:50 -07:00
INIT_LIST_HEAD ( & cset - > cgrp_links ) ;
2013-06-12 21:04:49 -07:00
INIT_LIST_HEAD ( & cset - > tasks ) ;
2014-02-25 10:04:01 -05:00
INIT_LIST_HEAD ( & cset - > mg_tasks ) ;
2014-02-25 10:04:03 -05:00
INIT_LIST_HEAD ( & cset - > mg_preload_node ) ;
2014-02-25 10:04:01 -05:00
INIT_LIST_HEAD ( & cset - > mg_node ) ;
2015-10-15 16:41:52 -04:00
INIT_LIST_HEAD ( & cset - > task_iters ) ;
2013-06-12 21:04:49 -07:00
INIT_HLIST_NODE ( & cset - > hlist ) ;
2007-10-18 23:39:36 -07:00
/* Copy the set of subsystem state objects generated in
* find_existing_css_set() */
2013-06-12 21:04:49 -07:00
memcpy ( cset - > subsys , template , sizeof ( cset - > subsys ) ) ;
2007-10-18 23:39:36 -07:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2007-10-18 23:39:36 -07:00
/* Add reference counts and links from the new css_set. */
2013-06-12 21:04:50 -07:00
list_for_each_entry ( link , & old_cset - > cgrp_links , cgrp_link ) {
2009-09-23 15:56:22 -07:00
struct cgroup * c = link - > cgrp ;
2013-06-12 21:04:50 -07:00
2009-09-23 15:56:22 -07:00
if ( c - > root = = cgrp - > root )
c = cgrp ;
2013-06-12 21:04:50 -07:00
link_css_set ( & tmp_links , cset , c ) ;
2009-09-23 15:56:22 -07:00
}
2007-10-18 23:39:36 -07:00
2013-06-12 21:04:50 -07:00
BUG_ON ( ! list_empty ( & tmp_links ) ) ;
2007-10-18 23:39:36 -07:00
css_set_count + + ;
2008-04-29 01:00:11 -07:00
2014-04-23 11:13:15 -04:00
/* Add @cset to the hash table */
2013-06-12 21:04:49 -07:00
key = css_set_hash ( cset - > subsys ) ;
hash_add ( css_set_table , & cset - > hlist , key ) ;
2008-04-29 01:00:11 -07:00
2015-11-23 14:55:41 -05:00
for_each_subsys ( ss , ssid ) {
struct cgroup_subsys_state * css = cset - > subsys [ ssid ] ;
2014-04-23 11:13:15 -04:00
list_add_tail ( & cset - > e_cset_node [ ssid ] ,
2015-11-23 14:55:41 -05:00
& css - > cgroup - > e_csets [ ssid ] ) ;
css_get ( css ) ;
}
2014-04-23 11:13:15 -04:00
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2007-10-18 23:39:36 -07:00
2013-06-12 21:04:49 -07:00
return cset ;
2007-10-18 23:39:33 -07:00
}
2014-03-19 10:23:54 -04:00
static struct cgroup_root * cgroup_root_from_kf ( struct kernfs_root * kf_root )
2014-02-11 11:52:49 -05:00
{
2014-03-19 10:23:54 -04:00
struct cgroup * root_cgrp = kf_root - > kn - > priv ;
2014-02-11 11:52:49 -05:00
2014-03-19 10:23:54 -04:00
return root_cgrp - > root ;
2014-02-11 11:52:49 -05:00
}
2014-03-19 10:23:54 -04:00
static int cgroup_init_root_id ( struct cgroup_root * root )
2014-02-11 11:52:49 -05:00
{
int id ;
lockdep_assert_held ( & cgroup_mutex ) ;
2014-03-19 10:23:53 -04:00
id = idr_alloc_cyclic ( & cgroup_hierarchy_idr , root , 0 , 0 , GFP_KERNEL ) ;
2014-02-11 11:52:49 -05:00
if ( id < 0 )
return id ;
root - > hierarchy_id = id ;
return 0 ;
}
2014-03-19 10:23:54 -04:00
static void cgroup_exit_root_id ( struct cgroup_root * root )
2014-02-11 11:52:49 -05:00
{
lockdep_assert_held ( & cgroup_mutex ) ;
2016-06-17 12:23:59 -04:00
idr_remove ( & cgroup_hierarchy_idr , root - > hierarchy_id ) ;
2014-02-11 11:52:49 -05:00
}
2014-03-19 10:23:54 -04:00
static void cgroup_free_root ( struct cgroup_root * root )
2014-02-11 11:52:49 -05:00
{
if ( root ) {
idr_destroy ( & root - > cgroup_idr ) ;
kfree ( root ) ;
}
}
2014-03-19 10:23:54 -04:00
static void cgroup_destroy_root ( struct cgroup_root * root )
2014-02-11 11:52:49 -05:00
{
2014-03-19 10:23:54 -04:00
struct cgroup * cgrp = & root - > cgrp ;
2014-02-11 11:52:49 -05:00
struct cgrp_cset_link * link , * tmp_link ;
2016-08-10 11:23:44 -04:00
trace_cgroup_destroy_root ( root ) ;
2016-03-03 09:58:01 -05:00
cgroup_lock_and_drain_offline ( & cgrp_dfl_root . cgrp ) ;
2014-02-11 11:52:49 -05:00
2014-02-12 09:29:50 -05:00
BUG_ON ( atomic_read ( & root - > nr_cgrps ) ) ;
2014-05-16 13:22:48 -04:00
BUG_ON ( ! list_empty ( & cgrp - > self . children ) ) ;
2014-02-11 11:52:49 -05:00
/* Rebind all subsystems back to the default hierarchy */
2016-03-03 09:58:01 -05:00
WARN_ON ( rebind_subsystems ( & cgrp_dfl_root , root - > subsys_mask ) ) ;
2014-02-11 11:52:49 -05:00
/*
* Release all the links from cset_links to this hierarchy's
* root cgroup
*/
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-02-11 11:52:49 -05:00
list_for_each_entry_safe ( link , tmp_link , & cgrp - > cset_links , cset_link ) {
list_del ( & link - > cset_link ) ;
list_del ( & link - > cgrp_link ) ;
kfree ( link ) ;
}
2015-10-15 16:41:53 -04:00
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-02-11 11:52:49 -05:00
if ( ! list_empty ( & root - > root_list ) ) {
list_del ( & root - > root_list ) ;
cgroup_root_count - - ;
}
cgroup_exit_root_id ( root ) ;
mutex_unlock ( & cgroup_mutex ) ;
2014-02-11 11:52:49 -05:00
kernfs_destroy_root ( root - > kf_root ) ;
2014-02-11 11:52:49 -05:00
cgroup_free_root ( root ) ;
}
2016-05-09 09:59:55 -05:00
/*
* look up cgroup associated with current task's cgroup namespace on the
* specified hierarchy
*/
static struct cgroup *
current_cgns_cgroup_from_root ( struct cgroup_root * root )
{
struct cgroup * res = NULL ;
struct css_set * cset ;
lockdep_assert_held ( & css_set_lock ) ;
rcu_read_lock ( ) ;
cset = current - > nsproxy - > cgroup_ns - > root_cset ;
if ( cset = = & init_css_set ) {
res = & root - > cgrp ;
} else {
struct cgrp_cset_link * link ;
list_for_each_entry ( link , & cset - > cgrp_links , cgrp_link ) {
struct cgroup * c = link - > cgrp ;
if ( c - > root = = root ) {
res = c ;
break ;
}
}
}
rcu_read_unlock ( ) ;
BUG_ON ( ! res ) ;
return res ;
}
2014-02-25 10:04:02 -05:00
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup * cset_cgroup_from_root ( struct css_set * cset ,
2014-03-19 10:23:54 -04:00
struct cgroup_root * root )
2009-09-23 15:56:22 -07:00
{
struct cgroup * res = NULL ;
2014-02-13 06:58:40 -05:00
lockdep_assert_held ( & cgroup_mutex ) ;
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2014-02-13 06:58:40 -05:00
2013-06-12 21:04:49 -07:00
if ( cset = = & init_css_set ) {
2014-03-19 10:23:54 -04:00
res = & root - > cgrp ;
2009-09-23 15:56:22 -07:00
} else {
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link ;
list_for_each_entry ( link , & cset - > cgrp_links , cgrp_link ) {
2009-09-23 15:56:22 -07:00
struct cgroup * c = link - > cgrp ;
2013-06-12 21:04:50 -07:00
2009-09-23 15:56:22 -07:00
if ( c - > root = = root ) {
res = c ;
break ;
}
}
}
2014-02-13 06:58:40 -05:00
2009-09-23 15:56:22 -07:00
BUG_ON ( ! res ) ;
return res ;
}
2007-10-18 23:39:30 -07:00
/*
2014-02-25 10:04:02 -05:00
* Return the cgroup for "task" from the given hierarchy. Must be
2015-10-15 16:41:53 -04:00
* called with cgroup_mutex and css_set_lock held.
2014-02-25 10:04:02 -05:00
*/
static struct cgroup * task_cgroup_from_root ( struct task_struct * task ,
2014-03-19 10:23:54 -04:00
struct cgroup_root * root )
2014-02-25 10:04:02 -05:00
{
/*
* No need to lock the task - since we hold cgroup_mutex the
* task can't change groups, so the only thing that can happen
* is that it exits and its css is set back to init_css_set.
*/
return cset_cgroup_from_root ( task_css_set ( task ) , root ) ;
}
2007-10-18 23:39:30 -07:00
/*
* A task must hold cgroup_mutex to modify cgroups.
*
* Any task can increment and decrement the count field without lock.
* So in general, code holding cgroup_mutex can't rely on the count
* field not changing. However, if the count goes to zero, then only
2008-02-07 00:14:43 -08:00
* cgroup_attach_task() can increment it again. Because a count of zero
2007-10-18 23:39:30 -07:00
* means that no tasks are currently attached, therefore there is no
* way a task attached to that cgroup can fork (the other way to
* increment the count). So code holding cgroup_mutex can safely
* assume that if the count is zero, it will stay zero. Similarly, if
* a task holds cgroup_mutex on a cgroup with zero count, it
* knows that the cgroup won't be removed, as cgroup_rmdir()
* needs that mutex.
*
* A cgroup can only be deleted if both its 'count' of using tasks
* is zero, and its list of 'children' cgroups is empty. Since all
* tasks in the system use _some_ cgroup, and since there is always at
2014-03-19 10:23:54 -04:00
* least one task in the system (init, pid == 1), therefore, root cgroup
2007-10-18 23:39:30 -07:00
* always has either children cgroups and/or using tasks. So we don't
2014-03-19 10:23:54 -04:00
* need a special hack to ensure that root cgroup cannot be deleted.
2007-10-18 23:39:30 -07:00
*
* P.S. One more locking exception. RCU is used to guard the
2008-02-07 00:14:43 -08:00
* update of a tasks cgroup pointer by cgroup_attach_task()
2007-10-18 23:39:30 -07:00
*/
2014-02-11 11:52:49 -05:00
static struct kernfs_syscall_ops cgroup_kf_syscall_ops ;
2009-10-01 15:43:56 -07:00
static const struct file_operations proc_cgroupstats_operations ;
2007-10-18 23:39:35 -07:00
2014-02-11 11:52:48 -05:00
static char * cgroup_file_name ( struct cgroup * cgrp , const struct cftype * cft ,
char * buf )
2007-10-18 23:39:30 -07:00
{
2015-08-18 13:58:16 -07:00
struct cgroup_subsys * ss = cft - > ss ;
2014-02-11 11:52:48 -05:00
if ( cft - > ss & & ! ( cft - > flags & CFTYPE_NO_PREFIX ) & &
! ( cgrp - > root - > flags & CGRP_ROOT_NOPREFIX ) )
snprintf ( buf , CGROUP_FILE_NAME_MAX , " %s.%s " ,
2015-08-18 13:58:16 -07:00
cgroup_on_dfl ( cgrp ) ? ss - > name : ss - > legacy_name ,
cft - > name ) ;
2014-02-11 11:52:48 -05:00
else
strncpy ( buf , cft - > name , CGROUP_FILE_NAME_MAX ) ;
return buf ;
2007-10-18 23:39:30 -07:00
}
2014-02-11 11:52:49 -05:00
/**
* cgroup_file_mode - deduce file mode of a control file
* @cft: the control file in question
*
2015-09-18 17:54:23 -04:00
* S_IRUGO for read, S_IWUSR for write.
2014-02-11 11:52:49 -05:00
*/
static umode_t cgroup_file_mode ( const struct cftype * cft )
2013-03-01 15:01:56 +08:00
{
2014-02-11 11:52:49 -05:00
umode_t mode = 0 ;
2013-03-01 15:01:56 +08:00
2014-02-11 11:52:49 -05:00
if ( cft - > read_u64 | | cft - > read_s64 | | cft - > seq_show )
mode | = S_IRUGO ;
2015-09-18 17:54:23 -04:00
if ( cft - > write_u64 | | cft - > write_s64 | | cft - > write ) {
if ( cft - > flags & CFTYPE_WORLD_WRITABLE )
mode | = S_IWUGO ;
else
mode | = S_IWUSR ;
}
2014-02-11 11:52:49 -05:00
return mode ;
2013-03-01 15:01:56 +08:00
}
2014-07-08 18:02:57 -04:00
/**
2016-02-22 22:25:46 -05:00
* cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
2014-11-18 02:49:50 -05:00
* @subtree_control: the new subtree_control mask to consider
2016-03-03 09:58:01 -05:00
* @this_ss_mask: available subsystems
2014-07-08 18:02:57 -04:00
*
* On the default hierarchy, a subsystem may request other subsystems to be
* enabled together through its ->depends_on mask. In such cases, more
* subsystems than specified in "cgroup.subtree_control" may be enabled.
*
2014-11-18 02:49:50 -05:00
* This function calculates which subsystems need to be enabled if
2016-03-03 09:58:01 -05:00
* @subtree_control is to be applied while restricted to @this_ss_mask.
2014-07-08 18:02:57 -04:00
*/
2016-03-03 09:58:01 -05:00
static u16 cgroup_calc_subtree_ss_mask ( u16 subtree_control , u16 this_ss_mask )
2014-07-08 18:02:56 -04:00
{
2016-02-22 22:25:47 -05:00
u16 cur_ss_mask = subtree_control ;
2014-07-08 18:02:57 -04:00
struct cgroup_subsys * ss ;
int ssid ;
lockdep_assert_held ( & cgroup_mutex ) ;
2016-03-08 11:51:26 -05:00
cur_ss_mask | = cgrp_dfl_implicit_ss_mask ;
2014-07-08 18:02:57 -04:00
while ( true ) {
2016-02-22 22:25:47 -05:00
u16 new_ss_mask = cur_ss_mask ;
2014-07-08 18:02:57 -04:00
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , ssid , cur_ss_mask ) {
2015-06-06 10:02:15 +10:00
new_ss_mask | = ss - > depends_on ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2014-07-08 18:02:57 -04:00
/*
* Mask out subsystems which aren't available. This can
* happen only if some depended-upon subsystems were bound
* to non-default hierarchies.
*/
2016-03-03 09:58:01 -05:00
new_ss_mask & = this_ss_mask ;
2014-07-08 18:02:57 -04:00
if ( new_ss_mask = = cur_ss_mask )
break ;
cur_ss_mask = new_ss_mask ;
}
2014-11-18 02:49:50 -05:00
return cur_ss_mask ;
}
2014-05-13 12:19:22 -04:00
/**
* cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
* @kn: the kernfs_node being serviced
*
* This helper undoes cgroup_kn_lock_live() and should be invoked before
* the method finishes if locking succeeded. Note that once this function
* returns the cgroup returned by cgroup_kn_lock_live() may become
* inaccessible any time. If the caller intends to continue to access the
* cgroup, it should pin it before invoking this function.
*/
static void cgroup_kn_unlock ( struct kernfs_node * kn )
{
struct cgroup * cgrp ;
if ( kernfs_type ( kn ) = = KERNFS_DIR )
cgrp = kn - > priv ;
else
cgrp = kn - > parent - > priv ;
mutex_unlock ( & cgroup_mutex ) ;
kernfs_unbreak_active_protection ( kn ) ;
cgroup_put ( cgrp ) ;
}
/**
* cgroup_kn_lock_live - locking helper for cgroup kernfs methods
* @kn: the kernfs_node being serviced
2016-03-03 09:58:00 -05:00
* @drain_offline: perform offline draining on the cgroup
2014-05-13 12:19:22 -04:00
*
* This helper is to be used by a cgroup kernfs method currently servicing
* @kn. It breaks the active protection, performs cgroup locking and
* verifies that the associated cgroup is alive. Returns the cgroup if
* alive; otherwise, %NULL. A successful return should be undone by a
2016-03-03 09:58:00 -05:00
* matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the
* cgroup is drained of offlining csses before return.
2014-05-13 12:19:22 -04:00
*
* Any cgroup kernfs method implementation which requires locking the
* associated cgroup should use this helper. It avoids nesting cgroup
* locking under kernfs active protection and allows all kernfs operations
* including self-removal.
*/
2016-03-03 09:58:00 -05:00
static struct cgroup * cgroup_kn_lock_live ( struct kernfs_node * kn ,
bool drain_offline )
2014-05-13 12:19:22 -04:00
{
struct cgroup * cgrp ;
if ( kernfs_type ( kn ) = = KERNFS_DIR )
cgrp = kn - > priv ;
else
cgrp = kn - > parent - > priv ;
2007-10-18 23:39:30 -07:00
2014-02-11 11:52:49 -05:00
/*
2014-05-13 12:19:23 -04:00
* We're gonna grab cgroup_mutex which nests outside kernfs
2014-05-13 12:19:22 -04:00
* active_ref. cgroup liveliness check alone provides enough
* protection against removal. Ensure @cgrp stays accessible and
* break the active_ref protection.
2014-02-11 11:52:49 -05:00
*/
2014-09-04 14:43:38 +08:00
if ( ! cgroup_tryget ( cgrp ) )
return NULL ;
2014-05-13 12:19:22 -04:00
kernfs_break_active_protection ( kn ) ;
2007-10-18 23:39:30 -07:00
2016-03-03 09:58:00 -05:00
if ( drain_offline )
cgroup_lock_and_drain_offline ( cgrp ) ;
else
mutex_lock ( & cgroup_mutex ) ;
2014-05-13 12:19:22 -04:00
if ( ! cgroup_is_dead ( cgrp ) )
return cgrp ;
cgroup_kn_unlock ( kn ) ;
return NULL ;
2007-10-18 23:39:30 -07:00
}
2013-01-21 18:18:33 +08:00
static void cgroup_rm_file ( struct cgroup * cgrp , const struct cftype * cft )
2007-10-18 23:39:30 -07:00
{
2014-02-11 11:52:49 -05:00
char name [ CGROUP_FILE_NAME_MAX ] ;
2007-10-18 23:39:30 -07:00
2014-05-13 12:19:23 -04:00
lockdep_assert_held ( & cgroup_mutex ) ;
2015-11-05 00:12:24 -05:00
if ( cft - > file_offset ) {
struct cgroup_subsys_state * css = cgroup_css ( cgrp , cft - > ss ) ;
struct cgroup_file * cfile = ( void * ) css + cft - > file_offset ;
spin_lock_irq ( & cgroup_file_kn_lock ) ;
cfile - > kn = NULL ;
spin_unlock_irq ( & cgroup_file_kn_lock ) ;
}
2014-02-11 11:52:49 -05:00
kernfs_remove_by_name ( cgrp - > kn , cgroup_file_name ( cgrp , cft , name ) ) ;
2012-04-01 12:09:56 -07:00
}
2012-08-23 16:53:29 -04:00
/**
2015-09-18 17:54:23 -04:00
* css_clear_dir - remove subsys files in a cgroup directory
* @css: taget css
2012-08-23 16:53:29 -04:00
*/
2016-03-03 09:58:01 -05:00
static void css_clear_dir ( struct cgroup_subsys_state * css )
2012-04-01 12:09:56 -07:00
{
2016-03-03 09:58:01 -05:00
struct cgroup * cgrp = css - > cgroup ;
2015-09-18 17:54:23 -04:00
struct cftype * cfts ;
2012-04-01 12:09:56 -07:00
2016-03-03 09:57:58 -05:00
if ( ! ( css - > flags & CSS_VISIBLE ) )
return ;
css - > flags & = ~ CSS_VISIBLE ;
2015-09-18 17:54:23 -04:00
list_for_each_entry ( cfts , & css - > ss - > cfts , node )
cgroup_addrm_files ( css , cgrp , cfts , false ) ;
2007-10-18 23:39:30 -07:00
}
2015-09-18 17:54:23 -04:00
/**
2015-09-18 17:54:23 -04:00
* css_populate_dir - create subsys files in a cgroup directory
* @css: target css
2015-09-18 17:54:23 -04:00
*
* On failure, no file is added.
*/
2016-03-03 09:58:01 -05:00
static int css_populate_dir ( struct cgroup_subsys_state * css )
2015-09-18 17:54:23 -04:00
{
2016-03-03 09:58:01 -05:00
struct cgroup * cgrp = css - > cgroup ;
2015-09-18 17:54:23 -04:00
struct cftype * cfts , * failed_cfts ;
int ret ;
2015-09-18 17:54:23 -04:00
2016-03-03 09:58:00 -05:00
if ( ( css - > flags & CSS_VISIBLE ) | | ! cgrp - > kn )
2016-03-03 09:57:58 -05:00
return 0 ;
2015-09-18 17:54:23 -04:00
if ( ! css - > ss ) {
if ( cgroup_on_dfl ( cgrp ) )
cfts = cgroup_dfl_base_files ;
else
cfts = cgroup_legacy_base_files ;
2015-09-18 17:54:23 -04:00
2015-09-18 17:54:23 -04:00
return cgroup_addrm_files ( & cgrp - > self , cgrp , cfts , true ) ;
}
2015-09-18 17:54:23 -04:00
2015-09-18 17:54:23 -04:00
list_for_each_entry ( cfts , & css - > ss - > cfts , node ) {
ret = cgroup_addrm_files ( css , cgrp , cfts , true ) ;
if ( ret < 0 ) {
failed_cfts = cfts ;
goto err ;
2015-09-18 17:54:23 -04:00
}
}
2016-03-03 09:57:58 -05:00
css - > flags | = CSS_VISIBLE ;
2015-09-18 17:54:23 -04:00
return 0 ;
err :
2015-09-18 17:54:23 -04:00
list_for_each_entry ( cfts , & css - > ss - > cfts , node ) {
if ( cfts = = failed_cfts )
break ;
cgroup_addrm_files ( css , cgrp , cfts , false ) ;
}
2015-09-18 17:54:23 -04:00
return ret ;
}
2016-02-22 22:25:47 -05:00
static int rebind_subsystems ( struct cgroup_root * dst_root , u16 ss_mask )
2007-10-18 23:39:30 -07:00
{
2015-09-18 17:54:23 -04:00
struct cgroup * dcgrp = & dst_root - > cgrp ;
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2014-04-23 11:13:15 -04:00
int ssid , i , ret ;
2007-10-18 23:39:30 -07:00
2014-02-11 11:52:47 -05:00
lockdep_assert_held ( & cgroup_mutex ) ;
2010-03-10 15:22:07 -08:00
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , ssid , ss_mask ) {
2016-03-08 11:51:26 -05:00
/*
* If @ss has non-root csses attached to it, can't move.
* If @ss is an implicit controller, it is exempt from this
* rule and can be stolen.
*/
if ( css_next_child ( NULL , cgroup_css ( & ss - > root - > cgrp , ss ) ) & &
! ss - > implicit_on_dfl )
2014-02-08 10:36:58 -05:00
return - EBUSY ;
2007-10-18 23:39:30 -07:00
2014-03-19 10:23:54 -04:00
/* can't move between two non-dummy roots either */
2014-04-23 11:13:16 -04:00
if ( ss - > root ! = & cgrp_dfl_root & & dst_root ! = & cgrp_dfl_root )
2014-03-19 10:23:54 -04:00
return - EBUSY ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2007-10-18 23:39:30 -07:00
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , ssid , ss_mask ) {
2015-09-18 17:54:23 -04:00
struct cgroup_root * src_root = ss - > root ;
struct cgroup * scgrp = & src_root - > cgrp ;
struct cgroup_subsys_state * css = cgroup_css ( scgrp , ss ) ;
2014-04-23 11:13:15 -04:00
struct css_set * cset ;
2013-06-25 11:53:37 -07:00
2015-09-18 17:54:23 -04:00
WARN_ON ( ! css | | cgroup_css ( dcgrp , ss ) ) ;
2013-06-24 15:21:47 -07:00
2016-03-03 09:58:01 -05:00
/* disable from the source */
src_root - > subsys_mask & = ~ ( 1 < < ssid ) ;
WARN_ON ( cgroup_apply_control ( scgrp ) ) ;
cgroup_finalize_control ( scgrp , 0 ) ;
2015-09-18 17:54:23 -04:00
2016-03-03 09:58:01 -05:00
/* rebind */
2015-09-18 17:54:23 -04:00
RCU_INIT_POINTER ( scgrp - > subsys [ ssid ] , NULL ) ;
rcu_assign_pointer ( dcgrp - > subsys [ ssid ] , css ) ;
2014-03-19 10:23:54 -04:00
ss - > root = dst_root ;
2015-09-18 17:54:23 -04:00
css - > cgroup = dcgrp ;
2013-06-24 15:21:47 -07:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-04-23 11:13:15 -04:00
hash_for_each ( css_set_table , i , cset , hlist )
list_move_tail ( & cset - > e_cset_node [ ss - > id ] ,
2015-09-18 17:54:23 -04:00
& dcgrp - > e_csets [ ss - > id ] ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-04-23 11:13:15 -04:00
2014-04-23 11:13:16 -04:00
/* default hierarchy doesn't enable controllers by default */
2014-04-23 11:13:14 -04:00
dst_root - > subsys_mask | = 1 < < ssid ;
2015-09-18 11:56:28 -04:00
if ( dst_root = = & cgrp_dfl_root ) {
static_branch_enable ( cgroup_subsys_on_dfl_key [ ssid ] ) ;
} else {
2015-09-18 17:54:23 -04:00
dcgrp - > subtree_control | = 1 < < ssid ;
2015-09-18 11:56:28 -04:00
static_branch_disable ( cgroup_subsys_on_dfl_key [ ssid ] ) ;
2014-07-08 18:02:56 -04:00
}
2013-08-13 11:01:55 -04:00
2016-03-03 09:58:01 -05:00
ret = cgroup_apply_control ( dcgrp ) ;
if ( ret )
pr_warn ( " partial failure to rebind %s controller (err=%d) \n " ,
ss - > name , ret ) ;
2014-03-19 10:23:54 -04:00
if ( ss - > bind )
ss - > bind ( css ) ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2007-10-18 23:39:30 -07:00
2015-09-18 17:54:23 -04:00
kernfs_activate ( dcgrp - > kn ) ;
2007-10-18 23:39:30 -07:00
return 0 ;
}
2016-05-09 09:59:55 -05:00
static int cgroup_show_path ( struct seq_file * sf , struct kernfs_node * kf_node ,
struct kernfs_root * kf_root )
{
2016-05-12 12:34:38 +03:00
int len = 0 ;
2016-05-09 09:59:55 -05:00
char * buf = NULL ;
struct cgroup_root * kf_cgroot = cgroup_root_from_kf ( kf_root ) ;
struct cgroup * ns_cgroup ;
buf = kmalloc ( PATH_MAX , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2016-05-09 09:59:55 -05:00
ns_cgroup = current_cgns_cgroup_from_root ( kf_cgroot ) ;
len = kernfs_path_from_node ( kf_node , ns_cgroup - > kn , buf , PATH_MAX ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2016-05-09 09:59:55 -05:00
if ( len > = PATH_MAX )
len = - ERANGE ;
else if ( len > 0 ) {
seq_escape ( sf , buf , " \t \n \\ " ) ;
len = 0 ;
}
kfree ( buf ) ;
return len ;
}
2014-02-11 11:52:49 -05:00
static int cgroup_show_options ( struct seq_file * seq ,
struct kernfs_root * kf_root )
2007-10-18 23:39:30 -07:00
{
2014-03-19 10:23:54 -04:00
struct cgroup_root * root = cgroup_root_from_kf ( kf_root ) ;
2007-10-18 23:39:30 -07:00
struct cgroup_subsys * ss ;
2013-12-06 15:11:57 -05:00
int ssid ;
2007-10-18 23:39:30 -07:00
2015-08-18 13:58:16 -07:00
if ( root ! = & cgrp_dfl_root )
for_each_subsys ( ss , ssid )
if ( root - > subsys_mask & ( 1 < < ssid ) )
2015-09-08 14:58:22 -07:00
seq_show_option ( seq , ss - > legacy_name , NULL ) ;
2013-04-14 20:15:25 -07:00
if ( root - > flags & CGRP_ROOT_NOPREFIX )
2007-10-18 23:39:30 -07:00
seq_puts ( seq , " ,noprefix " ) ;
2013-04-14 20:15:25 -07:00
if ( root - > flags & CGRP_ROOT_XATTR )
2012-08-23 16:53:30 -04:00
seq_puts ( seq , " ,xattr " ) ;
2014-02-08 10:36:58 -05:00
spin_lock ( & release_agent_path_lock ) ;
2007-10-18 23:39:38 -07:00
if ( strlen ( root - > release_agent_path ) )
2015-09-04 15:44:57 -07:00
seq_show_option ( seq , " release_agent " ,
root - > release_agent_path ) ;
2014-02-08 10:36:58 -05:00
spin_unlock ( & release_agent_path_lock ) ;
2014-03-19 10:23:54 -04:00
if ( test_bit ( CGRP_CPUSET_CLONE_CHILDREN , & root - > cgrp . flags ) )
2010-10-27 15:33:35 -07:00
seq_puts ( seq , " ,clone_children " ) ;
2009-09-23 15:56:19 -07:00
if ( strlen ( root - > name ) )
2015-09-04 15:44:57 -07:00
seq_show_option ( seq , " name " , root - > name ) ;
2007-10-18 23:39:30 -07:00
return 0 ;
}
struct cgroup_sb_opts {
2016-02-22 22:25:47 -05:00
u16 subsys_mask ;
2014-05-04 15:09:13 -04:00
unsigned int flags ;
2007-10-18 23:39:38 -07:00
char * release_agent ;
2012-11-19 08:13:38 -08:00
bool cpuset_clone_children ;
2009-09-23 15:56:19 -07:00
char * name ;
2009-09-23 15:56:23 -07:00
/* User explicitly requested empty subsystem */
bool none ;
2007-10-18 23:39:30 -07:00
} ;
2010-03-10 15:22:09 -08:00
static int parse_cgroupfs_options ( char * data , struct cgroup_sb_opts * opts )
2007-10-18 23:39:30 -07:00
{
2010-10-27 15:33:37 -07:00
char * token , * o = data ;
bool all_ss = false , one_ss = false ;
2016-02-22 22:25:47 -05:00
u16 mask = U16_MAX ;
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2014-07-09 10:08:08 -04:00
int nr_opts = 0 ;
2013-06-25 11:53:37 -07:00
int i ;
2009-06-17 16:26:33 -07:00
# ifdef CONFIG_CPUSETS
2016-02-22 22:25:47 -05:00
mask = ~ ( ( u16 ) 1 < < cpuset_cgrp_id ) ;
2009-06-17 16:26:33 -07:00
# endif
2007-10-18 23:39:30 -07:00
2009-09-23 15:56:19 -07:00
memset ( opts , 0 , sizeof ( * opts ) ) ;
2007-10-18 23:39:30 -07:00
while ( ( token = strsep ( & o , " , " ) ) ! = NULL ) {
2014-07-09 10:08:08 -04:00
nr_opts + + ;
2007-10-18 23:39:30 -07:00
if ( ! * token )
return - EINVAL ;
2010-10-27 15:33:37 -07:00
if ( ! strcmp ( token , " none " ) ) {
2009-09-23 15:56:23 -07:00
/* Explicitly have no subsystems */
opts - > none = true ;
2010-10-27 15:33:37 -07:00
continue ;
}
if ( ! strcmp ( token , " all " ) ) {
/* Mutually exclusive option 'all' + subsystem name */
if ( one_ss )
return - EINVAL ;
all_ss = true ;
continue ;
}
if ( ! strcmp ( token , " noprefix " ) ) {
2013-04-14 20:15:25 -07:00
opts - > flags | = CGRP_ROOT_NOPREFIX ;
2010-10-27 15:33:37 -07:00
continue ;
}
if ( ! strcmp ( token , " clone_children " ) ) {
2012-11-19 08:13:38 -08:00
opts - > cpuset_clone_children = true ;
2010-10-27 15:33:37 -07:00
continue ;
}
2012-08-23 16:53:30 -04:00
if ( ! strcmp ( token , " xattr " ) ) {
2013-04-14 20:15:25 -07:00
opts - > flags | = CGRP_ROOT_XATTR ;
2012-08-23 16:53:30 -04:00
continue ;
}
2010-10-27 15:33:37 -07:00
if ( ! strncmp ( token , " release_agent= " , 14 ) ) {
2007-10-18 23:39:38 -07:00
/* Specifying two release agents is forbidden */
if ( opts - > release_agent )
return - EINVAL ;
2009-09-23 15:56:19 -07:00
opts - > release_agent =
2010-08-10 18:02:54 -07:00
kstrndup ( token + 14 , PATH_MAX - 1 , GFP_KERNEL ) ;
2007-10-18 23:39:38 -07:00
if ( ! opts - > release_agent )
return - ENOMEM ;
2010-10-27 15:33:37 -07:00
continue ;
}
if ( ! strncmp ( token , " name= " , 5 ) ) {
2009-09-23 15:56:19 -07:00
const char * name = token + 5 ;
/* Can't specify an empty name */
if ( ! strlen ( name ) )
return - EINVAL ;
/* Must match [\w.-]+ */
for ( i = 0 ; i < strlen ( name ) ; i + + ) {
char c = name [ i ] ;
if ( isalnum ( c ) )
continue ;
if ( ( c = = ' . ' ) | | ( c = = ' - ' ) | | ( c = = ' _ ' ) )
continue ;
return - EINVAL ;
}
/* Specifying two names is forbidden */
if ( opts - > name )
return - EINVAL ;
opts - > name = kstrndup ( name ,
2010-08-10 18:02:54 -07:00
MAX_CGROUP_ROOT_NAMELEN - 1 ,
2009-09-23 15:56:19 -07:00
GFP_KERNEL ) ;
if ( ! opts - > name )
return - ENOMEM ;
2010-10-27 15:33:37 -07:00
continue ;
}
2013-06-25 11:53:37 -07:00
for_each_subsys ( ss , i ) {
2015-08-18 13:58:16 -07:00
if ( strcmp ( token , ss - > legacy_name ) )
2010-10-27 15:33:37 -07:00
continue ;
2015-09-18 11:56:28 -04:00
if ( ! cgroup_ssid_enabled ( i ) )
2010-10-27 15:33:37 -07:00
continue ;
2016-02-11 13:34:49 -05:00
if ( cgroup_ssid_no_v1 ( i ) )
continue ;
2010-10-27 15:33:37 -07:00
/* Mutually exclusive option 'all' + subsystem name */
if ( all_ss )
return - EINVAL ;
2014-05-04 15:09:13 -04:00
opts - > subsys_mask | = ( 1 < < i ) ;
2010-10-27 15:33:37 -07:00
one_ss = true ;
break ;
}
if ( i = = CGROUP_SUBSYS_COUNT )
return - ENOENT ;
}
2014-07-09 10:08:08 -04:00
/*
* If the 'all' option was specified select all the subsystems,
* otherwise if 'none', 'name=' and a subsystem name options were
* not specified, let's default to 'all'
*/
if ( all_ss | | ( ! one_ss & & ! opts - > none & & ! opts - > name ) )
for_each_subsys ( ss , i )
2016-02-11 13:34:49 -05:00
if ( cgroup_ssid_enabled ( i ) & & ! cgroup_ssid_no_v1 ( i ) )
2014-07-09 10:08:08 -04:00
opts - > subsys_mask | = ( 1 < < i ) ;
/*
* We either have to specify by name or by subsystems. (So all
* empty hierarchies must have a name).
*/
if ( ! opts - > subsys_mask & & ! opts - > name )
return - EINVAL ;
2009-06-17 16:26:33 -07:00
/*
* Option noprefix was introduced just for backward compatibility
* with the old cpuset, so we allow noprefix only if mounting just
* the cpuset subsystem.
*/
2013-04-14 20:15:25 -07:00
if ( ( opts - > flags & CGRP_ROOT_NOPREFIX ) & & ( opts - > subsys_mask & mask ) )
2009-06-17 16:26:33 -07:00
return - EINVAL ;
2009-09-23 15:56:23 -07:00
/* Can't specify "none" and some subsystems */
2012-08-23 16:53:31 -04:00
if ( opts - > subsys_mask & & opts - > none )
2009-09-23 15:56:23 -07:00
return - EINVAL ;
2007-10-18 23:39:30 -07:00
return 0 ;
}
2014-02-11 11:52:49 -05:00
static int cgroup_remount ( struct kernfs_root * kf_root , int * flags , char * data )
2007-10-18 23:39:30 -07:00
{
int ret = 0 ;
2014-03-19 10:23:54 -04:00
struct cgroup_root * root = cgroup_root_from_kf ( kf_root ) ;
2007-10-18 23:39:30 -07:00
struct cgroup_sb_opts opts ;
2016-02-22 22:25:47 -05:00
u16 added_mask , removed_mask ;
2007-10-18 23:39:30 -07:00
2014-07-09 10:08:08 -04:00
if ( root = = & cgrp_dfl_root ) {
pr_err ( " remount is not allowed \n " ) ;
2013-04-14 20:15:26 -07:00
return - EINVAL ;
}
2016-03-03 09:58:01 -05:00
cgroup_lock_and_drain_offline ( & cgrp_dfl_root . cgrp ) ;
2007-10-18 23:39:30 -07:00
/* See what subsystems are wanted */
ret = parse_cgroupfs_options ( data , & opts ) ;
if ( ret )
goto out_unlock ;
2014-04-23 11:13:14 -04:00
if ( opts . subsys_mask ! = root - > subsys_mask | | opts . release_agent )
2014-04-25 18:28:03 -04:00
pr_warn ( " option changes via remount are deprecated (pid=%d comm=%s) \n " ,
2014-04-25 18:28:03 -04:00
task_tgid_nr ( current ) , current - > comm ) ;
2012-04-01 12:09:54 -07:00
2014-04-23 11:13:14 -04:00
added_mask = opts . subsys_mask & ~ root - > subsys_mask ;
removed_mask = root - > subsys_mask & ~ opts . subsys_mask ;
2012-08-23 16:53:29 -04:00
2010-03-10 15:22:09 -08:00
/* Don't allow flags or name to change at remount */
2014-07-09 10:08:07 -04:00
if ( ( opts . flags ^ root - > flags ) | |
2010-03-10 15:22:09 -08:00
( opts . name & & strcmp ( opts . name , root - > name ) ) ) {
2014-05-04 15:09:13 -04:00
pr_err ( " option or name mismatch, new: 0x%x \" %s \" , old: 0x%x \" %s \" \n " ,
2014-07-09 10:08:07 -04:00
opts . flags , opts . name ? : " " , root - > flags , root - > name ) ;
2009-09-23 15:56:19 -07:00
ret = - EINVAL ;
goto out_unlock ;
}
2013-06-28 17:07:30 -07:00
/* remounting is not allowed for populated hierarchies */
2014-05-16 13:22:48 -04:00
if ( ! list_empty ( & root - > cgrp . self . children ) ) {
2013-06-28 17:07:30 -07:00
ret = - EBUSY ;
2009-04-02 16:57:30 -07:00
goto out_unlock ;
2010-03-10 15:22:09 -08:00
}
2007-10-18 23:39:30 -07:00
2014-03-19 10:23:54 -04:00
ret = rebind_subsystems ( root , added_mask ) ;
2013-06-28 17:07:30 -07:00
if ( ret )
2009-04-02 16:57:30 -07:00
goto out_unlock ;
2007-10-18 23:39:30 -07:00
2016-03-03 09:58:01 -05:00
WARN_ON ( rebind_subsystems ( & cgrp_dfl_root , removed_mask ) ) ;
2014-03-19 10:23:54 -04:00
2014-02-08 10:36:58 -05:00
if ( opts . release_agent ) {
spin_lock ( & release_agent_path_lock ) ;
2007-10-18 23:39:38 -07:00
strcpy ( root - > release_agent_path , opts . release_agent ) ;
2014-02-08 10:36:58 -05:00
spin_unlock ( & release_agent_path_lock ) ;
}
2016-08-10 11:23:44 -04:00
trace_cgroup_remount ( root ) ;
2007-10-18 23:39:30 -07:00
out_unlock :
2009-04-02 16:57:27 -07:00
kfree ( opts . release_agent ) ;
2009-09-23 15:56:19 -07:00
kfree ( opts . name ) ;
2007-10-18 23:39:30 -07:00
mutex_unlock ( & cgroup_mutex ) ;
return ret ;
}
2014-02-13 06:58:39 -05:00
/*
* To reduce the fork() overhead for systems that are not actually using
* their cgroups capability, we don't maintain the lists running through
* each css_set to its tasks until we see the list actually used - in other
* words after the first mount.
*/
static bool use_task_css_set_links __read_mostly ;
static void cgroup_enable_task_cg_lists ( void )
{
struct task_struct * p , * g ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-02-13 06:58:39 -05:00
if ( use_task_css_set_links )
goto out_unlock ;
use_task_css_set_links = true ;
/*
* We need tasklist_lock because RCU is not safe against
* while_each_thread(). Besides, a forking task that has passed
* cgroup_post_fork() without seeing use_task_css_set_links = 1
* is not guaranteed to have its child immediately visible in the
* tasklist if we walk through it with RCU.
*/
read_lock ( & tasklist_lock ) ;
do_each_thread ( g , p ) {
WARN_ON_ONCE ( ! list_empty ( & p - > cg_list ) | |
task_css_set ( p ) ! = & init_css_set ) ;
/*
* We should check if the process is exiting, otherwise
* it will race with cgroup_exit() in that the list
* entry won't be deleted though the process has exited.
2014-02-25 09:56:49 -05:00
* Do it while holding siglock so that we don't end up
* racing against cgroup_exit().
2016-06-22 17:28:41 -03:00
*
* Interrupts were already disabled while acquiring
* the css_set_lock, so we do not need to disable it
* again when acquiring the sighand->siglock here.
2014-02-13 06:58:39 -05:00
*/
2016-06-22 17:28:41 -03:00
spin_lock ( & p - > sighand - > siglock ) ;
2014-02-25 10:04:03 -05:00
if ( ! ( p - > flags & PF_EXITING ) ) {
struct css_set * cset = task_css_set ( p ) ;
2015-10-15 16:41:49 -04:00
if ( ! css_set_populated ( cset ) )
css_set_update_populated ( cset , true ) ;
2015-10-15 16:41:51 -04:00
list_add_tail ( & p - > cg_list , & cset - > tasks ) ;
2014-02-25 10:04:03 -05:00
get_css_set ( cset ) ;
}
2016-06-22 17:28:41 -03:00
spin_unlock ( & p - > sighand - > siglock ) ;
2014-02-13 06:58:39 -05:00
} while_each_thread ( g , p ) ;
read_unlock ( & tasklist_lock ) ;
out_unlock :
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-02-13 06:58:39 -05:00
}
2007-10-18 23:39:30 -07:00
2008-10-18 20:28:04 -07:00
static void init_cgroup_housekeeping ( struct cgroup * cgrp )
{
2014-04-23 11:13:15 -04:00
struct cgroup_subsys * ss ;
int ssid ;
2014-05-16 13:22:48 -04:00
INIT_LIST_HEAD ( & cgrp - > self . sibling ) ;
INIT_LIST_HEAD ( & cgrp - > self . children ) ;
2013-06-12 21:04:50 -07:00
INIT_LIST_HEAD ( & cgrp - > cset_links ) ;
2009-09-23 15:56:27 -07:00
INIT_LIST_HEAD ( & cgrp - > pidlists ) ;
mutex_init ( & cgrp - > pidlist_mutex ) ;
2014-05-14 09:15:00 -04:00
cgrp - > self . cgroup = cgrp ;
2014-05-16 13:22:51 -04:00
cgrp - > self . flags | = CSS_ONLINE ;
2014-04-23 11:13:15 -04:00
for_each_subsys ( ss , ssid )
INIT_LIST_HEAD ( & cgrp - > e_csets [ ssid ] ) ;
2014-04-23 11:13:16 -04:00
init_waitqueue_head ( & cgrp - > offline_waitq ) ;
2014-09-18 16:06:19 +08:00
INIT_WORK ( & cgrp - > release_agent_work , cgroup_release_agent ) ;
2008-10-18 20:28:04 -07:00
}
2009-09-23 15:56:19 -07:00
2014-03-19 10:23:54 -04:00
static void init_cgroup_root ( struct cgroup_root * root ,
2014-03-19 10:23:53 -04:00
struct cgroup_sb_opts * opts )
2007-10-18 23:39:30 -07:00
{
2014-03-19 10:23:54 -04:00
struct cgroup * cgrp = & root - > cgrp ;
2012-04-01 12:09:54 -07:00
2007-10-18 23:39:30 -07:00
INIT_LIST_HEAD ( & root - > root_list ) ;
2014-02-12 09:29:50 -05:00
atomic_set ( & root - > nr_cgrps , 1 ) ;
2007-10-18 23:40:44 -07:00
cgrp - > root = root ;
2008-10-18 20:28:04 -07:00
init_cgroup_housekeeping ( cgrp ) ;
2013-07-31 09:50:50 +08:00
idr_init ( & root - > cgroup_idr ) ;
2007-10-18 23:39:30 -07:00
2009-09-23 15:56:19 -07:00
root - > flags = opts - > flags ;
if ( opts - > release_agent )
strcpy ( root - > release_agent_path , opts - > release_agent ) ;
if ( opts - > name )
strcpy ( root - > name , opts - > name ) ;
2012-11-19 08:13:38 -08:00
if ( opts - > cpuset_clone_children )
2014-03-19 10:23:54 -04:00
set_bit ( CGRP_CPUSET_CLONE_CHILDREN , & root - > cgrp . flags ) ;
2009-09-23 15:56:19 -07:00
}
2016-02-22 22:25:47 -05:00
static int cgroup_setup_root ( struct cgroup_root * root , u16 ss_mask )
2009-09-23 15:56:23 -07:00
{
2014-02-11 11:52:48 -05:00
LIST_HEAD ( tmp_links ) ;
2014-03-19 10:23:54 -04:00
struct cgroup * root_cgrp = & root - > cgrp ;
2014-02-11 11:52:48 -05:00
struct css_set * cset ;
int i , ret ;
2009-09-23 15:56:23 -07:00
2014-02-11 11:52:48 -05:00
lockdep_assert_held ( & cgroup_mutex ) ;
2009-09-23 15:56:23 -07:00
2015-08-03 15:32:26 +03:00
ret = cgroup_idr_alloc ( & root - > cgroup_idr , root_cgrp , 1 , 2 , GFP_KERNEL ) ;
2014-02-11 11:52:48 -05:00
if ( ret < 0 )
2014-02-11 11:52:49 -05:00
goto out ;
2014-02-11 11:52:48 -05:00
root_cgrp - > id = ret ;
2015-11-20 15:55:52 -05:00
root_cgrp - > ancestor_ids [ 0 ] = ret ;
2009-09-23 15:56:19 -07:00
2014-09-24 13:31:50 -04:00
ret = percpu_ref_init ( & root_cgrp - > self . refcnt , css_release , 0 ,
GFP_KERNEL ) ;
2014-05-14 09:15:02 -04:00
if ( ret )
goto out ;
2014-02-11 11:52:48 -05:00
/*
2015-10-15 16:41:53 -04:00
* We're accessing css_set_count without locking css_set_lock here,
2014-02-11 11:52:48 -05:00
* but that's OK - it can only be increased by someone holding
2016-03-03 09:58:01 -05:00
* cgroup_lock, and that's us. Later rebinding may disable
* controllers on the default hierarchy and thus create new csets,
* which can't be more than the existing ones. Allocate 2x.
2014-02-11 11:52:48 -05:00
*/
2016-03-03 09:58:01 -05:00
ret = allocate_cgrp_cset_links ( 2 * css_set_count , & tmp_links ) ;
2007-10-18 23:39:30 -07:00
if ( ret )
2014-05-14 09:15:02 -04:00
goto cancel_ref ;
2007-10-18 23:39:30 -07:00
2014-03-19 10:23:53 -04:00
ret = cgroup_init_root_id ( root ) ;
2014-02-11 11:52:48 -05:00
if ( ret )
2014-05-14 09:15:02 -04:00
goto cancel_ref ;
2007-10-18 23:39:30 -07:00
2014-02-11 11:52:49 -05:00
root - > kf_root = kernfs_create_root ( & cgroup_kf_syscall_ops ,
KERNFS_ROOT_CREATE_DEACTIVATED ,
root_cgrp ) ;
if ( IS_ERR ( root - > kf_root ) ) {
ret = PTR_ERR ( root - > kf_root ) ;
goto exit_root_id ;
}
root_cgrp - > kn = root - > kf_root - > kn ;
2007-10-18 23:39:30 -07:00
2016-03-03 09:58:01 -05:00
ret = css_populate_dir ( & root_cgrp - > self ) ;
2014-02-11 11:52:48 -05:00
if ( ret )
2014-02-11 11:52:49 -05:00
goto destroy_root ;
2007-10-18 23:39:30 -07:00
2014-03-19 10:23:54 -04:00
ret = rebind_subsystems ( root , ss_mask ) ;
2014-02-11 11:52:48 -05:00
if ( ret )
2014-02-11 11:52:49 -05:00
goto destroy_root ;
2010-12-21 13:29:29 -05:00
2016-08-10 11:23:44 -04:00
trace_cgroup_setup_root ( root ) ;
2014-02-11 11:52:48 -05:00
/*
* There must be no failure case after here, since rebinding takes
* care of subsystems' refcounts, which are explicitly dropped in
* the failure exit path.
*/
list_add ( & root - > root_list , & cgroup_roots ) ;
cgroup_root_count + + ;
2007-10-18 23:39:30 -07:00
2014-02-11 11:52:48 -05:00
/*
2014-03-19 10:23:54 -04:00
* Link the root cgroup in this hierarchy into all the css_set
2014-02-11 11:52:48 -05:00
* objects.
*/
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2015-10-15 16:41:49 -04:00
hash_for_each ( css_set_table , i , cset , hlist ) {
2014-02-11 11:52:48 -05:00
link_css_set ( & tmp_links , cset , root_cgrp ) ;
2015-10-15 16:41:49 -04:00
if ( css_set_populated ( cset ) )
cgroup_update_populated ( root_cgrp , true ) ;
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2007-10-18 23:39:30 -07:00
2014-05-16 13:22:48 -04:00
BUG_ON ( ! list_empty ( & root_cgrp - > self . children ) ) ;
2014-02-12 09:29:50 -05:00
BUG_ON ( atomic_read ( & root - > nr_cgrps ) ! = 1 ) ;
2014-02-11 11:52:48 -05:00
2014-02-11 11:52:49 -05:00
kernfs_activate ( root_cgrp - > kn ) ;
2014-02-11 11:52:48 -05:00
ret = 0 ;
2014-02-11 11:52:49 -05:00
goto out ;
2014-02-11 11:52:48 -05:00
2014-02-11 11:52:49 -05:00
destroy_root :
kernfs_destroy_root ( root - > kf_root ) ;
root - > kf_root = NULL ;
exit_root_id :
2014-02-11 11:52:48 -05:00
cgroup_exit_root_id ( root ) ;
2014-05-14 09:15:02 -04:00
cancel_ref :
2014-06-28 08:10:14 -04:00
percpu_ref_exit ( & root_cgrp - > self . refcnt ) ;
2014-02-11 11:52:49 -05:00
out :
2014-02-11 11:52:48 -05:00
free_cgrp_cset_links ( & tmp_links ) ;
return ret ;
2007-10-18 23:39:30 -07:00
}
2010-07-26 13:23:11 +04:00
static struct dentry * cgroup_mount ( struct file_system_type * fs_type ,
2007-10-18 23:39:30 -07:00
int flags , const char * unused_dev_name ,
2010-07-26 13:23:11 +04:00
void * data )
2007-10-18 23:39:30 -07:00
{
2015-11-16 11:13:34 -05:00
bool is_v2 = fs_type = = & cgroup2_fs_type ;
2014-06-30 11:50:59 +08:00
struct super_block * pinned_sb = NULL ;
2016-01-29 02:54:09 -06:00
struct cgroup_namespace * ns = current - > nsproxy - > cgroup_ns ;
2014-06-30 11:49:58 +08:00
struct cgroup_subsys * ss ;
2014-03-19 10:23:54 -04:00
struct cgroup_root * root ;
2007-10-18 23:39:30 -07:00
struct cgroup_sb_opts opts ;
2014-02-11 11:52:49 -05:00
struct dentry * dentry ;
2014-02-11 11:52:48 -05:00
int ret ;
2014-06-30 11:49:58 +08:00
int i ;
2014-04-04 17:14:41 +08:00
bool new_sb ;
2009-09-23 15:56:19 -07:00
2016-01-29 02:54:09 -06:00
get_cgroup_ns ( ns ) ;
/* Check if the caller has permission to mount. */
if ( ! ns_capable ( ns - > user_ns , CAP_SYS_ADMIN ) ) {
put_cgroup_ns ( ns ) ;
return ERR_PTR ( - EPERM ) ;
}
2009-09-23 15:56:19 -07:00
/*
2014-02-13 06:58:38 -05:00
* The first time anyone tries to mount a cgroup, enable the list
* linking each css_set to its tasks and fix up all existing tasks.
2009-09-23 15:56:19 -07:00
*/
2014-02-13 06:58:38 -05:00
if ( ! use_task_css_set_links )
cgroup_enable_task_cg_lists ( ) ;
2014-04-17 13:53:08 +08:00
2015-11-16 11:13:34 -05:00
if ( is_v2 ) {
if ( data ) {
pr_err ( " cgroup2: unknown option \" %s \" \n " , ( char * ) data ) ;
2016-01-29 02:54:09 -06:00
put_cgroup_ns ( ns ) ;
2015-11-16 11:13:34 -05:00
return ERR_PTR ( - EINVAL ) ;
}
2016-02-23 10:00:50 -05:00
cgrp_dfl_visible = true ;
2015-11-16 11:13:34 -05:00
root = & cgrp_dfl_root ;
cgroup_get ( & root - > cgrp ) ;
goto out_mount ;
}
2016-03-03 09:58:01 -05:00
cgroup_lock_and_drain_offline ( & cgrp_dfl_root . cgrp ) ;
2007-10-18 23:39:30 -07:00
/* First find the desired set of subsystems */
ret = parse_cgroupfs_options ( data , & opts ) ;
2009-09-23 15:56:19 -07:00
if ( ret )
2014-02-11 11:52:48 -05:00
goto out_unlock ;
2014-05-14 09:15:00 -04:00
2014-06-30 11:49:58 +08:00
/*
* Destruction of cgroup root is asynchronous, so subsystems may
* still be dying after the previous unmount. Let's drain the
* dying subsystems. We just need to ensure that the ones
* unmounted previously finish dying and don't care about new ones
* starting. Testing ref liveliness is good enough.
*/
for_each_subsys ( ss , i ) {
if ( ! ( opts . subsys_mask & ( 1 < < i ) ) | |
ss - > root = = & cgrp_dfl_root )
continue ;
if ( ! percpu_ref_tryget_live ( & ss - > root - > cgrp . self . refcnt ) ) {
mutex_unlock ( & cgroup_mutex ) ;
msleep ( 10 ) ;
ret = restart_syscall ( ) ;
goto out_free ;
}
cgroup_put ( & ss - > root - > cgrp ) ;
}
2014-03-19 10:23:53 -04:00
for_each_root ( root ) {
2014-02-11 11:52:49 -05:00
bool name_match = false ;
2007-10-18 23:39:30 -07:00
2014-03-19 10:23:54 -04:00
if ( root = = & cgrp_dfl_root )
2014-03-19 10:23:53 -04:00
continue ;
2009-09-23 15:56:19 -07:00
2007-10-18 23:39:36 -07:00
/*
2014-02-11 11:52:49 -05:00
* If we asked for a name then it must match. Also, if
* name matches but sybsys_mask doesn't, we should fail.
* Remember whether name matched.
2007-10-18 23:39:36 -07:00
*/
2014-02-11 11:52:49 -05:00
if ( opts . name ) {
if ( strcmp ( opts . name , root - > name ) )
continue ;
name_match = true ;
}
2013-06-28 17:07:30 -07:00
/*
2014-02-11 11:52:49 -05:00
* If we asked for subsystems (or explicitly for no
* subsystems) then they must match.
2013-06-28 17:07:30 -07:00
*/
2014-02-11 11:52:49 -05:00
if ( ( opts . subsys_mask | | opts . none ) & &
2014-04-23 11:13:14 -04:00
( opts . subsys_mask ! = root - > subsys_mask ) ) {
2014-02-11 11:52:49 -05:00
if ( ! name_match )
continue ;
ret = - EBUSY ;
goto out_unlock ;
}
2013-04-14 20:15:26 -07:00
2014-07-09 10:08:08 -04:00
if ( root - > flags ^ opts . flags )
pr_warn ( " new mount options do not match the existing superblock, will be ignored \n " ) ;
2014-02-11 11:52:49 -05:00
2014-02-12 09:29:50 -05:00
/*
2014-06-30 11:50:59 +08:00
* We want to reuse @root whose lifetime is governed by its
* ->cgrp. Let's check whether @root is alive and keep it
* that way. As cgroup_kill_sb() can happen anytime, we
* want to block it by pinning the sb so that @root doesn't
* get killed before mount is complete.
*
* With the sb pinned, tryget_live can reliably indicate
* whether @root can be reused. If it's being killed,
* drain it. We can use wait_queue for the wait but this
* path is super cold. Let's just sleep a bit and retry.
2014-02-12 09:29:50 -05:00
*/
2014-06-30 11:50:59 +08:00
pinned_sb = kernfs_pin_sb ( root - > kf_root , NULL ) ;
if ( IS_ERR ( pinned_sb ) | |
! percpu_ref_tryget_live ( & root - > cgrp . self . refcnt ) ) {
2014-02-12 09:29:50 -05:00
mutex_unlock ( & cgroup_mutex ) ;
2014-06-30 11:50:59 +08:00
if ( ! IS_ERR_OR_NULL ( pinned_sb ) )
deactivate_super ( pinned_sb ) ;
2014-02-12 09:29:50 -05:00
msleep ( 10 ) ;
2014-05-14 09:15:00 -04:00
ret = restart_syscall ( ) ;
goto out_free ;
2014-02-12 09:29:50 -05:00
}
ret = 0 ;
2014-02-11 11:52:49 -05:00
goto out_unlock ;
2007-10-18 23:39:30 -07:00
}
2014-03-19 10:23:53 -04:00
/*
* No such thing, create a new one. name= matching without subsys
* specification is allowed for already existing hierarchies but we
* can't create new one without subsys specification.
*/
if ( ! opts . subsys_mask & & ! opts . none ) {
ret = - EINVAL ;
2014-02-11 11:52:49 -05:00
goto out_unlock ;
}
2007-10-18 23:39:30 -07:00
2016-07-15 06:36:44 -05:00
/* Hierarchies may only be created in the initial cgroup namespace. */
if ( ns ! = & init_cgroup_ns ) {
2016-01-29 02:54:09 -06:00
ret = - EPERM ;
goto out_unlock ;
}
2014-03-19 10:23:53 -04:00
root = kzalloc ( sizeof ( * root ) , GFP_KERNEL ) ;
if ( ! root ) {
ret = - ENOMEM ;
goto out_unlock ;
}
init_cgroup_root ( root , & opts ) ;
2014-02-13 06:58:38 -05:00
ret = cgroup_setup_root ( root , opts . subsys_mask ) ;
2014-02-11 11:52:49 -05:00
if ( ret )
cgroup_free_root ( root ) ;
2014-02-11 11:52:48 -05:00
out_unlock :
2011-12-12 18:12:21 -08:00
mutex_unlock ( & cgroup_mutex ) ;
2014-05-14 09:15:00 -04:00
out_free :
2009-09-23 15:56:19 -07:00
kfree ( opts . release_agent ) ;
kfree ( opts . name ) ;
2014-02-11 11:52:48 -05:00
2016-01-29 02:54:09 -06:00
if ( ret ) {
put_cgroup_ns ( ns ) ;
2014-02-11 11:52:48 -05:00
return ERR_PTR ( ret ) ;
2016-01-29 02:54:09 -06:00
}
2015-11-16 11:13:34 -05:00
out_mount :
2014-04-26 15:40:28 +08:00
dentry = kernfs_mount ( fs_type , flags , root - > kf_root ,
2015-11-16 11:13:34 -05:00
is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC ,
& new_sb ) ;
2016-01-29 02:54:09 -06:00
/*
* In non-init cgroup namespace, instead of root cgroup's
* dentry, we return the dentry corresponding to the
* cgroupns->root_cgrp.
*/
if ( ! IS_ERR ( dentry ) & & ns ! = & init_cgroup_ns ) {
struct dentry * nsdentry ;
struct cgroup * cgrp ;
mutex_lock ( & cgroup_mutex ) ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2016-01-29 02:54:09 -06:00
cgrp = cset_cgroup_from_root ( ns - > root_cset , root ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2016-01-29 02:54:09 -06:00
mutex_unlock ( & cgroup_mutex ) ;
nsdentry = kernfs_node_dentry ( cgrp - > kn , dentry - > d_sb ) ;
dput ( dentry ) ;
dentry = nsdentry ;
}
2014-04-04 17:14:41 +08:00
if ( IS_ERR ( dentry ) | | ! new_sb )
2014-03-19 10:23:54 -04:00
cgroup_put ( & root - > cgrp ) ;
2014-06-30 11:50:59 +08:00
/*
* If @pinned_sb, we're reusing an existing root and holding an
* extra ref on its sb. Mount is complete. Put the extra ref.
*/
if ( pinned_sb ) {
WARN_ON ( new_sb ) ;
deactivate_super ( pinned_sb ) ;
}
2016-01-29 02:54:09 -06:00
put_cgroup_ns ( ns ) ;
2014-02-11 11:52:49 -05:00
return dentry ;
2007-10-18 23:39:30 -07:00
}
2014-01-18 16:56:47 +09:00
static void cgroup_kill_sb ( struct super_block * sb )
{
2014-02-11 11:52:49 -05:00
struct kernfs_root * kf_root = kernfs_root_from_sb ( sb ) ;
2014-03-19 10:23:54 -04:00
struct cgroup_root * root = cgroup_root_from_kf ( kf_root ) ;
2007-10-18 23:39:30 -07:00
2014-05-14 09:15:02 -04:00
/*
* If @root doesn't have any mounts or children, start killing it.
* This prevents new mounts by disabling percpu_ref_tryget_live().
* cgroup_mount() may wait for @root's release.
2014-06-04 16:48:15 +08:00
*
* And don't kill the default root.
2014-05-14 09:15:02 -04:00
*/
2015-01-22 10:19:43 -05:00
if ( ! list_empty ( & root - > cgrp . self . children ) | |
2014-06-04 16:48:15 +08:00
root = = & cgrp_dfl_root )
2014-05-14 09:15:02 -04:00
cgroup_put ( & root - > cgrp ) ;
else
percpu_ref_kill ( & root - > cgrp . self . refcnt ) ;
2014-02-11 11:52:49 -05:00
kernfs_kill_sb ( sb ) ;
2007-10-18 23:39:30 -07:00
}
static struct file_system_type cgroup_fs_type = {
. name = " cgroup " ,
2010-07-26 13:23:11 +04:00
. mount = cgroup_mount ,
2007-10-18 23:39:30 -07:00
. kill_sb = cgroup_kill_sb ,
2016-01-29 02:54:11 -06:00
. fs_flags = FS_USERNS_MOUNT ,
2007-10-18 23:39:30 -07:00
} ;
2015-11-16 11:13:34 -05:00
static struct file_system_type cgroup2_fs_type = {
. name = " cgroup2 " ,
. mount = cgroup_mount ,
. kill_sb = cgroup_kill_sb ,
2016-01-29 02:54:11 -06:00
. fs_flags = FS_USERNS_MOUNT ,
2015-11-16 11:13:34 -05:00
} ;
2016-08-10 11:23:44 -04:00
static int cgroup_path_ns_locked ( struct cgroup * cgrp , char * buf , size_t buflen ,
struct cgroup_namespace * ns )
2016-01-29 02:54:06 -06:00
{
struct cgroup * root = cset_cgroup_from_root ( ns - > root_cset , cgrp - > root ) ;
2016-08-10 11:23:44 -04:00
return kernfs_path_from_node ( cgrp - > kn , root - > kn , buf , buflen ) ;
2016-01-29 02:54:06 -06:00
}
2016-08-10 11:23:44 -04:00
int cgroup_path_ns ( struct cgroup * cgrp , char * buf , size_t buflen ,
struct cgroup_namespace * ns )
2016-01-29 02:54:06 -06:00
{
2016-08-10 11:23:44 -04:00
int ret ;
2016-01-29 02:54:06 -06:00
mutex_lock ( & cgroup_mutex ) ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2016-01-29 02:54:06 -06:00
ret = cgroup_path_ns_locked ( cgrp , buf , buflen , ns ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2016-01-29 02:54:06 -06:00
mutex_unlock ( & cgroup_mutex ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( cgroup_path_ns ) ;
2013-04-14 20:50:08 -07:00
/**
2013-07-11 16:34:48 -07:00
* task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
2013-04-14 20:50:08 -07:00
* @task: target task
* @buf: the buffer to write the path into
* @buflen: the length of the buffer
*
2013-07-11 16:34:48 -07:00
* Determine @task's cgroup on the first (the one with the lowest non-zero
* hierarchy_id) cgroup hierarchy and copy its path into @buf. This
* function grabs cgroup_mutex and shouldn't be used inside locks used by
* cgroup controller callbacks.
*
2014-02-12 09:29:50 -05:00
* Return value is the same as kernfs_path().
2013-04-14 20:50:08 -07:00
*/
2016-08-10 11:23:44 -04:00
int task_cgroup_path ( struct task_struct * task , char * buf , size_t buflen )
2013-04-14 20:50:08 -07:00
{
2014-03-19 10:23:54 -04:00
struct cgroup_root * root ;
2013-07-11 16:34:48 -07:00
struct cgroup * cgrp ;
2014-02-12 09:29:50 -05:00
int hierarchy_id = 1 ;
2016-08-10 11:23:44 -04:00
int ret ;
2013-04-14 20:50:08 -07:00
mutex_lock ( & cgroup_mutex ) ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2013-04-14 20:50:08 -07:00
2013-07-11 16:34:48 -07:00
root = idr_get_next ( & cgroup_hierarchy_idr , & hierarchy_id ) ;
2013-04-14 20:50:08 -07:00
if ( root ) {
cgrp = task_cgroup_from_root ( task , root ) ;
2016-08-10 11:23:44 -04:00
ret = cgroup_path_ns_locked ( cgrp , buf , buflen , & init_cgroup_ns ) ;
2013-07-11 16:34:48 -07:00
} else {
/* if no hierarchy exists, everyone is in "/" */
2016-08-10 11:23:44 -04:00
ret = strlcpy ( buf , " / " , buflen ) ;
2013-04-14 20:50:08 -07:00
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2013-04-14 20:50:08 -07:00
mutex_unlock ( & cgroup_mutex ) ;
2016-08-10 11:23:44 -04:00
return ret ;
2013-04-14 20:50:08 -07:00
}
2013-07-11 16:34:48 -07:00
EXPORT_SYMBOL_GPL ( task_cgroup_path ) ;
2013-04-14 20:50:08 -07:00
2014-02-25 10:04:01 -05:00
/* used to track tasks and other necessary states during migration */
2011-12-12 18:12:21 -08:00
struct cgroup_taskset {
2014-02-25 10:04:01 -05:00
/* the src and dst cset list running through cset->mg_node */
struct list_head src_csets ;
struct list_head dst_csets ;
2015-12-03 10:18:21 -05:00
/* the subsys currently being processed */
int ssid ;
2014-02-25 10:04:01 -05:00
/*
* Fields for cgroup_taskset_*() iteration.
*
* Before migration is committed, the target migration tasks are on
* ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
* the csets on ->dst_csets. ->csets point to either ->src_csets
* or ->dst_csets depending on whether migration is committed.
*
* ->cur_csets and ->cur_task point to the current task position
* during iteration.
*/
struct list_head * csets ;
struct css_set * cur_cset ;
struct task_struct * cur_task ;
2011-12-12 18:12:21 -08:00
} ;
2015-09-11 15:00:21 -04:00
# define CGROUP_TASKSET_INIT(tset) (struct cgroup_taskset){ \
.src_csets = LIST_HEAD_INIT(tset.src_csets), \
.dst_csets = LIST_HEAD_INIT(tset.dst_csets), \
.csets = &tset.src_csets, \
}
/**
* cgroup_taskset_add - try to add a migration target task to a taskset
* @task: target task
* @tset: target taskset
*
* Add @task, which is a migration target, to @tset. This function becomes
* noop if @task doesn't need to be migrated. @task's css_set should have
* been added as a migration source and @task->cg_list will be moved from
* the css_set's tasks list to mg_tasks one.
*/
static void cgroup_taskset_add ( struct task_struct * task ,
struct cgroup_taskset * tset )
{
struct css_set * cset ;
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2015-09-11 15:00:21 -04:00
/* @task either already exited or can't exit until the end */
if ( task - > flags & PF_EXITING )
return ;
/* leave @task alone if post_fork() hasn't linked it yet */
if ( list_empty ( & task - > cg_list ) )
return ;
cset = task_css_set ( task ) ;
if ( ! cset - > mg_src_cgrp )
return ;
list_move_tail ( & task - > cg_list , & cset - > mg_tasks ) ;
if ( list_empty ( & cset - > mg_node ) )
list_add_tail ( & cset - > mg_node , & tset - > src_csets ) ;
if ( list_empty ( & cset - > mg_dst_cset - > mg_node ) )
list_move_tail ( & cset - > mg_dst_cset - > mg_node ,
& tset - > dst_csets ) ;
}
2011-12-12 18:12:21 -08:00
/**
* cgroup_taskset_first - reset taskset and return the first task
* @tset: taskset of interest
2015-12-03 10:18:21 -05:00
* @dst_cssp: output variable for the destination css
2011-12-12 18:12:21 -08:00
*
* @tset iteration is initialized and the first task is returned.
*/
2015-12-03 10:18:21 -05:00
struct task_struct * cgroup_taskset_first ( struct cgroup_taskset * tset ,
struct cgroup_subsys_state * * dst_cssp )
2011-12-12 18:12:21 -08:00
{
2014-02-25 10:04:01 -05:00
tset - > cur_cset = list_first_entry ( tset - > csets , struct css_set , mg_node ) ;
tset - > cur_task = NULL ;
2015-12-03 10:18:21 -05:00
return cgroup_taskset_next ( tset , dst_cssp ) ;
2011-12-12 18:12:21 -08:00
}
/**
* cgroup_taskset_next - iterate to the next task in taskset
* @tset: taskset of interest
2015-12-03 10:18:21 -05:00
* @dst_cssp: output variable for the destination css
2011-12-12 18:12:21 -08:00
*
* Return the next task in @tset. Iteration must have been initialized
* with cgroup_taskset_first().
*/
2015-12-03 10:18:21 -05:00
struct task_struct * cgroup_taskset_next ( struct cgroup_taskset * tset ,
struct cgroup_subsys_state * * dst_cssp )
2011-12-12 18:12:21 -08:00
{
2014-02-25 10:04:01 -05:00
struct css_set * cset = tset - > cur_cset ;
struct task_struct * task = tset - > cur_task ;
2011-12-12 18:12:21 -08:00
2014-02-25 10:04:01 -05:00
while ( & cset - > mg_node ! = tset - > csets ) {
if ( ! task )
task = list_first_entry ( & cset - > mg_tasks ,
struct task_struct , cg_list ) ;
else
task = list_next_entry ( task , cg_list ) ;
2011-12-12 18:12:21 -08:00
2014-02-25 10:04:01 -05:00
if ( & task - > cg_list ! = & cset - > mg_tasks ) {
tset - > cur_cset = cset ;
tset - > cur_task = task ;
2015-12-03 10:18:21 -05:00
/*
* This function may be called both before and
* after cgroup_taskset_migrate(). The two cases
* can be distinguished by looking at whether @cset
* has its ->mg_dst_cset set.
*/
if ( cset - > mg_dst_cset )
* dst_cssp = cset - > mg_dst_cset - > subsys [ tset - > ssid ] ;
else
* dst_cssp = cset - > subsys [ tset - > ssid ] ;
2014-02-25 10:04:01 -05:00
return task ;
}
cset = list_next_entry ( cset , mg_node ) ;
task = NULL ;
}
return NULL ;
2011-12-12 18:12:21 -08:00
}
2015-09-11 15:00:21 -04:00
/**
2016-03-08 11:51:26 -05:00
* cgroup_taskset_migrate - migrate a taskset
2015-09-11 15:00:21 -04:00
* @tset: taget taskset
2016-03-08 11:51:26 -05:00
* @root: cgroup root the migration is taking place on
2015-09-11 15:00:21 -04:00
*
2016-03-08 11:51:26 -05:00
* Migrate tasks in @tset as setup by migration preparation functions.
* This function fails iff one of the ->can_attach callbacks fails and
* guarantees that either all or none of the tasks in @tset are migrated.
* @tset is consumed regardless of success.
2015-09-11 15:00:21 -04:00
*/
static int cgroup_taskset_migrate ( struct cgroup_taskset * tset ,
2016-03-08 11:51:26 -05:00
struct cgroup_root * root )
2015-09-11 15:00:21 -04:00
{
2016-03-08 11:51:26 -05:00
struct cgroup_subsys * ss ;
2015-09-11 15:00:21 -04:00
struct task_struct * task , * tmp_task ;
struct css_set * cset , * tmp_cset ;
2016-03-08 11:51:26 -05:00
int ssid , failed_ssid , ret ;
2015-09-11 15:00:21 -04:00
/* methods shouldn't be called if no task is actually migrating */
if ( list_empty ( & tset - > src_csets ) )
return 0 ;
/* check that we can legitimately attach to the cgroup */
2016-03-08 11:51:26 -05:00
do_each_subsys_mask ( ss , ssid , root - > subsys_mask ) {
if ( ss - > can_attach ) {
tset - > ssid = ssid ;
ret = ss - > can_attach ( tset ) ;
2015-09-11 15:00:21 -04:00
if ( ret ) {
2016-03-08 11:51:26 -05:00
failed_ssid = ssid ;
2015-09-11 15:00:21 -04:00
goto out_cancel_attach ;
}
}
2016-03-08 11:51:26 -05:00
} while_each_subsys_mask ( ) ;
2015-09-11 15:00:21 -04:00
/*
* Now that we're guaranteed success, proceed to move all tasks to
* the new cgroup. There are no failure cases after here, so this
* is the commit point.
*/
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2015-09-11 15:00:21 -04:00
list_for_each_entry ( cset , & tset - > src_csets , mg_node ) {
2015-10-15 16:41:52 -04:00
list_for_each_entry_safe ( task , tmp_task , & cset - > mg_tasks , cg_list ) {
struct css_set * from_cset = task_css_set ( task ) ;
struct css_set * to_cset = cset - > mg_dst_cset ;
get_css_set ( to_cset ) ;
css_set_move_task ( task , from_cset , to_cset , true ) ;
put_css_set_locked ( from_cset ) ;
}
2015-09-11 15:00:21 -04:00
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2015-09-11 15:00:21 -04:00
/*
* Migration is committed, all target tasks are now on dst_csets.
* Nothing is sensitive to fork() after this point. Notify
* controllers that migration is complete.
*/
tset - > csets = & tset - > dst_csets ;
2016-03-08 11:51:26 -05:00
do_each_subsys_mask ( ss , ssid , root - > subsys_mask ) {
if ( ss - > attach ) {
tset - > ssid = ssid ;
ss - > attach ( tset ) ;
2015-12-03 10:18:21 -05:00
}
2016-03-08 11:51:26 -05:00
} while_each_subsys_mask ( ) ;
2015-09-11 15:00:21 -04:00
ret = 0 ;
goto out_release_tset ;
out_cancel_attach :
2016-03-08 11:51:26 -05:00
do_each_subsys_mask ( ss , ssid , root - > subsys_mask ) {
if ( ssid = = failed_ssid )
2015-09-11 15:00:21 -04:00
break ;
2016-03-08 11:51:26 -05:00
if ( ss - > cancel_attach ) {
tset - > ssid = ssid ;
ss - > cancel_attach ( tset ) ;
2015-12-03 10:18:21 -05:00
}
2016-03-08 11:51:26 -05:00
} while_each_subsys_mask ( ) ;
2015-09-11 15:00:21 -04:00
out_release_tset :
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2015-09-11 15:00:21 -04:00
list_splice_init ( & tset - > dst_csets , & tset - > src_csets ) ;
list_for_each_entry_safe ( cset , tmp_cset , & tset - > src_csets , mg_node ) {
list_splice_tail_init ( & cset - > mg_tasks , & cset - > tasks ) ;
list_del_init ( & cset - > mg_node ) ;
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2015-09-11 15:00:21 -04:00
return ret ;
}
2016-03-08 11:51:25 -05:00
/**
* cgroup_may_migrate_to - verify whether a cgroup can be migration destination
* @dst_cgrp: destination cgroup to test
*
* On the default hierarchy, except for the root, subtree_control must be
* zero for migration destination cgroups with tasks so that child cgroups
* don't compete against tasks.
*/
static bool cgroup_may_migrate_to ( struct cgroup * dst_cgrp )
{
return ! cgroup_on_dfl ( dst_cgrp ) | | ! cgroup_parent ( dst_cgrp ) | |
! dst_cgrp - > subtree_control ;
}
2008-02-23 15:24:09 -08:00
/**
2014-02-25 10:04:03 -05:00
* cgroup_migrate_finish - cleanup after attach
* @preloaded_csets: list of preloaded css_sets
2011-05-26 16:25:20 -07:00
*
2014-02-25 10:04:03 -05:00
* Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
* those functions for details.
2011-05-26 16:25:20 -07:00
*/
2014-02-25 10:04:03 -05:00
static void cgroup_migrate_finish ( struct list_head * preloaded_csets )
2011-05-26 16:25:20 -07:00
{
2014-02-25 10:04:03 -05:00
struct css_set * cset , * tmp_cset ;
lockdep_assert_held ( & cgroup_mutex ) ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
list_for_each_entry_safe ( cset , tmp_cset , preloaded_csets , mg_preload_node ) {
cset - > mg_src_cgrp = NULL ;
2016-03-08 11:51:26 -05:00
cset - > mg_dst_cgrp = NULL ;
2014-02-25 10:04:03 -05:00
cset - > mg_dst_cset = NULL ;
list_del_init ( & cset - > mg_preload_node ) ;
2014-09-19 16:51:00 +08:00
put_css_set_locked ( cset ) ;
2014-02-25 10:04:03 -05:00
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
}
/**
* cgroup_migrate_add_src - add a migration source css_set
* @src_cset: the source css_set to add
* @dst_cgrp: the destination cgroup
* @preloaded_csets: list of preloaded css_sets
*
* Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
* @src_cset and add it to @preloaded_csets, which should later be cleaned
* up by cgroup_migrate_finish().
*
2015-09-16 12:53:17 -04:00
* This function may be called without holding cgroup_threadgroup_rwsem
* even if the target is a process. Threads may be created and destroyed
* but as long as cgroup_mutex is not dropped, no new css_set can be put
* into play and the preloaded css_sets are guaranteed to cover all
* migrations.
2014-02-25 10:04:03 -05:00
*/
static void cgroup_migrate_add_src ( struct css_set * src_cset ,
struct cgroup * dst_cgrp ,
struct list_head * preloaded_csets )
{
struct cgroup * src_cgrp ;
lockdep_assert_held ( & cgroup_mutex ) ;
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
2016-03-15 20:43:04 -04:00
/*
* If ->dead, @src_set is associated with one or more dead cgroups
* and doesn't contain any migratable tasks. Ignore it early so
* that the rest of migration path doesn't get confused by it.
*/
if ( src_cset - > dead )
return ;
2014-02-25 10:04:03 -05:00
src_cgrp = cset_cgroup_from_root ( src_cset , dst_cgrp - > root ) ;
if ( ! list_empty ( & src_cset - > mg_preload_node ) )
return ;
WARN_ON ( src_cset - > mg_src_cgrp ) ;
2016-03-08 11:51:26 -05:00
WARN_ON ( src_cset - > mg_dst_cgrp ) ;
2014-02-25 10:04:03 -05:00
WARN_ON ( ! list_empty ( & src_cset - > mg_tasks ) ) ;
WARN_ON ( ! list_empty ( & src_cset - > mg_node ) ) ;
src_cset - > mg_src_cgrp = src_cgrp ;
2016-03-08 11:51:26 -05:00
src_cset - > mg_dst_cgrp = dst_cgrp ;
2014-02-25 10:04:03 -05:00
get_css_set ( src_cset ) ;
list_add ( & src_cset - > mg_preload_node , preloaded_csets ) ;
}
/**
* cgroup_migrate_prepare_dst - prepare destination css_sets for migration
* @preloaded_csets: list of preloaded source css_sets
*
2016-03-08 11:51:26 -05:00
* Tasks are about to be moved and all the source css_sets have been
* preloaded to @preloaded_csets. This function looks up and pins all
* destination css_sets, links each to its source, and append them to
* @preloaded_csets.
2014-02-25 10:04:03 -05:00
*
* This function must be called after cgroup_migrate_add_src() has been
* called on each migration source css_set. After migration is performed
* using cgroup_migrate(), cgroup_migrate_finish() must be called on
* @preloaded_csets.
*/
2016-03-08 11:51:26 -05:00
static int cgroup_migrate_prepare_dst ( struct list_head * preloaded_csets )
2014-02-25 10:04:03 -05:00
{
LIST_HEAD ( csets ) ;
2014-04-23 11:13:16 -04:00
struct css_set * src_cset , * tmp_cset ;
2014-02-25 10:04:03 -05:00
lockdep_assert_held ( & cgroup_mutex ) ;
/* look up the dst cset for each src cset and link it to src */
2014-04-23 11:13:16 -04:00
list_for_each_entry_safe ( src_cset , tmp_cset , preloaded_csets , mg_preload_node ) {
2014-02-25 10:04:03 -05:00
struct css_set * dst_cset ;
2016-03-08 11:51:26 -05:00
dst_cset = find_css_set ( src_cset , src_cset - > mg_dst_cgrp ) ;
2014-02-25 10:04:03 -05:00
if ( ! dst_cset )
goto err ;
WARN_ON_ONCE ( src_cset - > mg_dst_cset | | dst_cset - > mg_dst_cset ) ;
2014-04-23 11:13:16 -04:00
/*
* If src cset equals dst, it's noop. Drop the src.
* cgroup_migrate() will skip the cset too. Note that we
* can't handle src == dst as some nodes are used by both.
*/
if ( src_cset = = dst_cset ) {
src_cset - > mg_src_cgrp = NULL ;
2016-03-08 11:51:26 -05:00
src_cset - > mg_dst_cgrp = NULL ;
2014-04-23 11:13:16 -04:00
list_del_init ( & src_cset - > mg_preload_node ) ;
2014-09-19 16:51:00 +08:00
put_css_set ( src_cset ) ;
put_css_set ( dst_cset ) ;
2014-04-23 11:13:16 -04:00
continue ;
}
2014-02-25 10:04:03 -05:00
src_cset - > mg_dst_cset = dst_cset ;
if ( list_empty ( & dst_cset - > mg_preload_node ) )
list_add ( & dst_cset - > mg_preload_node , & csets ) ;
else
2014-09-19 16:51:00 +08:00
put_css_set ( dst_cset ) ;
2014-02-25 10:04:03 -05:00
}
2014-04-23 11:13:16 -04:00
list_splice_tail ( & csets , preloaded_csets ) ;
2014-02-25 10:04:03 -05:00
return 0 ;
err :
cgroup_migrate_finish ( & csets ) ;
return - ENOMEM ;
}
/**
* cgroup_migrate - migrate a process or task to a cgroup
* @leader: the leader of the process or the task to migrate
* @threadgroup: whether @leader points to the whole process or a single task
2016-03-08 11:51:26 -05:00
* @root: cgroup root migration is taking place on
2014-02-25 10:04:03 -05:00
*
2016-03-08 11:51:26 -05:00
* Migrate a process or task denoted by @leader. If migrating a process,
* the caller must be holding cgroup_threadgroup_rwsem. The caller is also
* responsible for invoking cgroup_migrate_add_src() and
2014-02-25 10:04:03 -05:00
* cgroup_migrate_prepare_dst() on the targets before invoking this
* function and following up with cgroup_migrate_finish().
*
* As long as a controller's ->can_attach() doesn't fail, this function is
* guaranteed to succeed. This means that, excluding ->can_attach()
* failure, when migrating multiple targets, the success or failure can be
* decided for all targets by invoking group_migrate_prepare_dst() before
* actually starting migrating.
*/
2015-09-11 15:00:20 -04:00
static int cgroup_migrate ( struct task_struct * leader , bool threadgroup ,
2016-03-08 11:51:26 -05:00
struct cgroup_root * root )
2011-05-26 16:25:20 -07:00
{
2015-09-11 15:00:21 -04:00
struct cgroup_taskset tset = CGROUP_TASKSET_INIT ( tset ) ;
struct task_struct * task ;
2011-05-26 16:25:20 -07:00
2012-01-03 21:18:31 -08:00
/*
* Prevent freeing of tasks while we take a snapshot. Tasks that are
* already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock.
*/
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2012-01-03 21:18:31 -08:00
rcu_read_lock ( ) ;
2014-02-13 06:58:43 -05:00
task = leader ;
2011-05-26 16:25:20 -07:00
do {
2015-09-11 15:00:21 -04:00
cgroup_taskset_add ( task , & tset ) ;
2013-03-13 09:17:09 +08:00
if ( ! threadgroup )
break ;
2014-02-13 06:58:43 -05:00
} while_each_thread ( leader , task ) ;
2012-01-03 21:18:31 -08:00
rcu_read_unlock ( ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2011-05-26 16:25:20 -07:00
2016-03-08 11:51:26 -05:00
return cgroup_taskset_migrate ( & tset , root ) ;
2011-05-26 16:25:20 -07:00
}
2014-02-25 10:04:03 -05:00
/**
* cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
* @dst_cgrp: the cgroup to attach to
* @leader: the task or the leader of the threadgroup to be attached
* @threadgroup: attach the whole threadgroup?
*
2015-09-16 12:53:17 -04:00
* Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2014-02-25 10:04:03 -05:00
*/
static int cgroup_attach_task ( struct cgroup * dst_cgrp ,
struct task_struct * leader , bool threadgroup )
{
LIST_HEAD ( preloaded_csets ) ;
struct task_struct * task ;
int ret ;
2016-03-08 11:51:25 -05:00
if ( ! cgroup_may_migrate_to ( dst_cgrp ) )
return - EBUSY ;
2014-02-25 10:04:03 -05:00
/* look up all src csets */
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
rcu_read_lock ( ) ;
task = leader ;
do {
cgroup_migrate_add_src ( task_css_set ( task ) , dst_cgrp ,
& preloaded_csets ) ;
if ( ! threadgroup )
break ;
} while_each_thread ( leader , task ) ;
rcu_read_unlock ( ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
/* prepare dst csets and commit */
2016-03-08 11:51:26 -05:00
ret = cgroup_migrate_prepare_dst ( & preloaded_csets ) ;
2014-02-25 10:04:03 -05:00
if ( ! ret )
2016-03-08 11:51:26 -05:00
ret = cgroup_migrate ( leader , threadgroup , dst_cgrp - > root ) ;
2014-02-25 10:04:03 -05:00
cgroup_migrate_finish ( & preloaded_csets ) ;
2016-08-10 11:23:44 -04:00
if ( ! ret )
trace_cgroup_attach_task ( dst_cgrp , leader , threadgroup ) ;
2014-02-25 10:04:03 -05:00
return ret ;
2011-05-26 16:25:20 -07:00
}
2015-06-18 16:54:28 -04:00
static int cgroup_procs_write_permission ( struct task_struct * task ,
struct cgroup * dst_cgrp ,
struct kernfs_open_file * of )
2015-06-18 16:54:28 -04:00
{
const struct cred * cred = current_cred ( ) ;
const struct cred * tcred = get_task_cred ( task ) ;
int ret = 0 ;
/*
* even if we're attaching all tasks in the thread group, we only
* need to check permissions on one of them.
*/
if ( ! uid_eq ( cred - > euid , GLOBAL_ROOT_UID ) & &
! uid_eq ( cred - > euid , tcred - > uid ) & &
! uid_eq ( cred - > euid , tcred - > suid ) )
ret = - EACCES ;
2015-06-18 16:54:28 -04:00
if ( ! ret & & cgroup_on_dfl ( dst_cgrp ) ) {
struct super_block * sb = of - > file - > f_path . dentry - > d_sb ;
struct cgroup * cgrp ;
struct inode * inode ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2015-06-18 16:54:28 -04:00
cgrp = task_cgroup_from_root ( task , & cgrp_dfl_root ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2015-06-18 16:54:28 -04:00
while ( ! cgroup_is_descendant ( dst_cgrp , cgrp ) )
cgrp = cgroup_parent ( cgrp ) ;
ret = - ENOMEM ;
2015-09-18 17:54:23 -04:00
inode = kernfs_get_inode ( sb , cgrp - > procs_file . kn ) ;
2015-06-18 16:54:28 -04:00
if ( inode ) {
ret = inode_permission ( inode , MAY_WRITE ) ;
iput ( inode ) ;
}
}
2015-06-18 16:54:28 -04:00
put_cred ( tcred ) ;
return ret ;
}
2011-05-26 16:25:20 -07:00
/*
* Find the task_struct of the task to attach by vpid and pass it along to the
2011-12-12 18:12:21 -08:00
* function to attach either it or all tasks in its threadgroup. Will lock
2014-02-25 10:04:03 -05:00
* cgroup_mutex and threadgroup.
2011-05-26 16:25:20 -07:00
*/
2014-05-13 12:16:22 -04:00
static ssize_t __cgroup_procs_write ( struct kernfs_open_file * of , char * buf ,
size_t nbytes , loff_t off , bool threadgroup )
2007-10-18 23:39:32 -07:00
{
struct task_struct * tsk ;
2016-04-21 19:06:48 -04:00
struct cgroup_subsys * ss ;
2014-05-13 12:19:23 -04:00
struct cgroup * cgrp ;
2014-05-13 12:16:22 -04:00
pid_t pid ;
2016-04-21 19:06:48 -04:00
int ssid , ret ;
2007-10-18 23:39:32 -07:00
2014-05-13 12:16:22 -04:00
if ( kstrtoint ( strstrip ( buf ) , 0 , & pid ) | | pid < 0 )
return - EINVAL ;
2016-03-03 09:58:00 -05:00
cgrp = cgroup_kn_lock_live ( of - > kn , false ) ;
2014-05-13 12:19:23 -04:00
if ( ! cgrp )
2011-05-26 16:25:20 -07:00
return - ENODEV ;
2015-09-16 13:03:02 -04:00
percpu_down_write ( & cgroup_threadgroup_rwsem ) ;
2012-01-03 21:18:30 -08:00
rcu_read_lock ( ) ;
2007-10-18 23:39:32 -07:00
if ( pid ) {
2008-02-07 00:14:47 -08:00
tsk = find_task_by_vpid ( pid ) ;
2011-05-26 16:25:20 -07:00
if ( ! tsk ) {
2014-01-18 16:56:47 +09:00
ret = - ESRCH ;
2015-09-16 13:03:02 -04:00
goto out_unlock_rcu ;
2007-10-18 23:39:32 -07:00
}
2015-06-18 16:54:28 -04:00
} else {
2012-01-03 21:18:30 -08:00
tsk = current ;
2015-06-18 16:54:28 -04:00
}
2011-12-12 18:12:21 -08:00
if ( threadgroup )
2012-01-03 21:18:30 -08:00
tsk = tsk - > group_leader ;
2012-04-21 09:13:46 +02:00
/*
2013-03-19 13:45:20 -07:00
* Workqueue threads may acquire PF_NO_SETAFFINITY and become
2012-04-21 09:13:46 +02:00
* trapped in a cpuset, or RT worker may be born in a cgroup
* with no rt_runtime allocated. Just say no.
*/
2013-03-19 13:45:20 -07:00
if ( tsk = = kthreadd_task | | ( tsk - > flags & PF_NO_SETAFFINITY ) ) {
2012-04-21 09:13:46 +02:00
ret = - EINVAL ;
2015-09-16 13:03:02 -04:00
goto out_unlock_rcu ;
2012-04-21 09:13:46 +02:00
}
2012-01-03 21:18:30 -08:00
get_task_struct ( tsk ) ;
rcu_read_unlock ( ) ;
2011-12-12 18:12:21 -08:00
2015-06-18 16:54:28 -04:00
ret = cgroup_procs_write_permission ( tsk , cgrp , of ) ;
2015-06-18 16:54:28 -04:00
if ( ! ret )
ret = cgroup_attach_task ( cgrp , tsk , threadgroup ) ;
2013-03-13 09:17:09 +08:00
2015-09-16 11:51:12 -04:00
put_task_struct ( tsk ) ;
2015-09-16 13:03:02 -04:00
goto out_unlock_threadgroup ;
out_unlock_rcu :
rcu_read_unlock ( ) ;
out_unlock_threadgroup :
percpu_up_write ( & cgroup_threadgroup_rwsem ) ;
2016-04-21 19:06:48 -04:00
for_each_subsys ( ss , ssid )
if ( ss - > post_attach )
ss - > post_attach ( ) ;
2014-05-13 12:19:23 -04:00
cgroup_kn_unlock ( of - > kn ) ;
2014-05-13 12:16:22 -04:00
return ret ? : nbytes ;
2007-10-18 23:39:32 -07:00
}
2013-04-07 09:29:51 -07:00
/**
* cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
* @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
int cgroup_attach_task_all ( struct task_struct * from , struct task_struct * tsk )
{
2014-03-19 10:23:54 -04:00
struct cgroup_root * root ;
2013-04-07 09:29:51 -07:00
int retval = 0 ;
2013-04-07 09:29:51 -07:00
mutex_lock ( & cgroup_mutex ) ;
2016-07-15 06:35:51 -05:00
percpu_down_write ( & cgroup_threadgroup_rwsem ) ;
2014-03-19 10:23:53 -04:00
for_each_root ( root ) {
2014-02-13 06:58:40 -05:00
struct cgroup * from_cgrp ;
2014-03-19 10:23:54 -04:00
if ( root = = & cgrp_dfl_root )
2014-03-19 10:23:53 -04:00
continue ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-02-13 06:58:40 -05:00
from_cgrp = task_cgroup_from_root ( from , root ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2013-04-07 09:29:51 -07:00
2013-07-31 16:18:36 +08:00
retval = cgroup_attach_task ( from_cgrp , tsk , false ) ;
2013-04-07 09:29:51 -07:00
if ( retval )
break ;
}
2016-07-15 06:35:51 -05:00
percpu_up_write ( & cgroup_threadgroup_rwsem ) ;
2013-04-07 09:29:51 -07:00
mutex_unlock ( & cgroup_mutex ) ;
2013-04-07 09:29:51 -07:00
return retval ;
}
EXPORT_SYMBOL_GPL ( cgroup_attach_task_all ) ;
2014-05-13 12:16:22 -04:00
static ssize_t cgroup_tasks_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
2011-05-26 16:25:20 -07:00
{
2014-05-13 12:16:22 -04:00
return __cgroup_procs_write ( of , buf , nbytes , off , false ) ;
2011-05-26 16:25:20 -07:00
}
2014-05-13 12:16:22 -04:00
static ssize_t cgroup_procs_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
2008-07-25 01:47:01 -07:00
{
2014-05-13 12:16:22 -04:00
return __cgroup_procs_write ( of , buf , nbytes , off , true ) ;
2008-07-25 01:47:01 -07:00
}
2014-05-13 12:16:21 -04:00
static ssize_t cgroup_release_agent_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes , loff_t off )
2008-07-25 01:46:59 -07:00
{
2014-05-13 12:19:23 -04:00
struct cgroup * cgrp ;
2014-02-11 11:52:48 -05:00
2014-05-13 12:19:23 -04:00
BUILD_BUG_ON ( sizeof ( cgrp - > root - > release_agent_path ) < PATH_MAX ) ;
2016-03-03 09:58:00 -05:00
cgrp = cgroup_kn_lock_live ( of - > kn , false ) ;
2014-05-13 12:19:23 -04:00
if ( ! cgrp )
2008-07-25 01:46:59 -07:00
return - ENODEV ;
2014-02-08 10:36:58 -05:00
spin_lock ( & release_agent_path_lock ) ;
2014-05-13 12:19:23 -04:00
strlcpy ( cgrp - > root - > release_agent_path , strstrip ( buf ) ,
sizeof ( cgrp - > root - > release_agent_path ) ) ;
2014-02-08 10:36:58 -05:00
spin_unlock ( & release_agent_path_lock ) ;
2014-05-13 12:19:23 -04:00
cgroup_kn_unlock ( of - > kn ) ;
2014-05-13 12:16:21 -04:00
return nbytes ;
2008-07-25 01:46:59 -07:00
}
2013-12-05 12:28:04 -05:00
static int cgroup_release_agent_show ( struct seq_file * seq , void * v )
2008-07-25 01:46:59 -07:00
{
2013-12-05 12:28:04 -05:00
struct cgroup * cgrp = seq_css ( seq ) - > cgroup ;
2013-08-08 20:11:24 -04:00
2014-05-13 12:11:00 -04:00
spin_lock ( & release_agent_path_lock ) ;
2008-07-25 01:46:59 -07:00
seq_puts ( seq , cgrp - > root - > release_agent_path ) ;
2014-05-13 12:11:00 -04:00
spin_unlock ( & release_agent_path_lock ) ;
2008-07-25 01:46:59 -07:00
seq_putc ( seq , ' \n ' ) ;
return 0 ;
}
2013-12-05 12:28:04 -05:00
static int cgroup_sane_behavior_show ( struct seq_file * seq , void * v )
2013-04-14 20:15:26 -07:00
{
2014-07-09 10:08:08 -04:00
seq_puts ( seq , " 0 \n " ) ;
2008-07-25 01:46:59 -07:00
return 0 ;
}
2016-02-22 22:25:47 -05:00
static void cgroup_print_ss_mask ( struct seq_file * seq , u16 ss_mask )
2014-04-23 11:13:16 -04:00
{
struct cgroup_subsys * ss ;
bool printed = false ;
int ssid ;
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , ssid , ss_mask ) {
2015-06-06 10:02:15 +10:00
if ( printed )
seq_putc ( seq , ' ' ) ;
seq_printf ( seq , " %s " , ss - > name ) ;
printed = true ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2014-04-23 11:13:16 -04:00
if ( printed )
seq_putc ( seq , ' \n ' ) ;
}
/* show controllers which are enabled from the parent */
static int cgroup_controllers_show ( struct seq_file * seq , void * v )
{
struct cgroup * cgrp = seq_css ( seq ) - > cgroup ;
2016-03-03 09:57:58 -05:00
cgroup_print_ss_mask ( seq , cgroup_control ( cgrp ) ) ;
2014-04-23 11:13:16 -04:00
return 0 ;
}
/* show controllers which are enabled for a given cgroup's children */
static int cgroup_subtree_control_show ( struct seq_file * seq , void * v )
{
struct cgroup * cgrp = seq_css ( seq ) - > cgroup ;
2014-07-08 18:02:56 -04:00
cgroup_print_ss_mask ( seq , cgrp - > subtree_control ) ;
2014-04-23 11:13:16 -04:00
return 0 ;
}
/**
* cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
* @cgrp: root of the subtree to update csses for
*
2016-03-03 09:58:01 -05:00
* @cgrp's control masks have changed and its subtree's css associations
* need to be updated accordingly. This function looks up all css_sets
* which are attached to the subtree, creates the matching updated css_sets
* and migrates the tasks to the new ones.
2014-04-23 11:13:16 -04:00
*/
static int cgroup_update_dfl_csses ( struct cgroup * cgrp )
{
LIST_HEAD ( preloaded_csets ) ;
2015-09-11 15:00:22 -04:00
struct cgroup_taskset tset = CGROUP_TASKSET_INIT ( tset ) ;
2016-03-03 09:58:01 -05:00
struct cgroup_subsys_state * d_css ;
struct cgroup * dsct ;
2014-04-23 11:13:16 -04:00
struct css_set * src_cset ;
int ret ;
lockdep_assert_held ( & cgroup_mutex ) ;
2015-09-16 13:03:02 -04:00
percpu_down_write ( & cgroup_threadgroup_rwsem ) ;
2014-04-23 11:13:16 -04:00
/* look up all csses currently attached to @cgrp's subtree */
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2016-03-03 09:58:01 -05:00
cgroup_for_each_live_descendant_pre ( dsct , d_css , cgrp ) {
2014-04-23 11:13:16 -04:00
struct cgrp_cset_link * link ;
2016-03-03 09:58:01 -05:00
list_for_each_entry ( link , & dsct - > cset_links , cset_link )
2016-03-08 11:51:25 -05:00
cgroup_migrate_add_src ( link - > cset , dsct ,
2014-04-23 11:13:16 -04:00
& preloaded_csets ) ;
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-04-23 11:13:16 -04:00
/* NULL dst indicates self on default hierarchy */
2016-03-08 11:51:26 -05:00
ret = cgroup_migrate_prepare_dst ( & preloaded_csets ) ;
2014-04-23 11:13:16 -04:00
if ( ret )
goto out_finish ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-04-23 11:13:16 -04:00
list_for_each_entry ( src_cset , & preloaded_csets , mg_preload_node ) {
2015-09-11 15:00:22 -04:00
struct task_struct * task , * ntask ;
2014-04-23 11:13:16 -04:00
/* src_csets precede dst_csets, break on the first dst_cset */
if ( ! src_cset - > mg_src_cgrp )
break ;
2015-09-11 15:00:22 -04:00
/* all tasks in src_csets need to be migrated */
list_for_each_entry_safe ( task , ntask , & src_cset - > tasks , cg_list )
cgroup_taskset_add ( task , & tset ) ;
2014-04-23 11:13:16 -04:00
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-04-23 11:13:16 -04:00
2016-03-08 11:51:26 -05:00
ret = cgroup_taskset_migrate ( & tset , cgrp - > root ) ;
2014-04-23 11:13:16 -04:00
out_finish :
cgroup_migrate_finish ( & preloaded_csets ) ;
2015-09-16 13:03:02 -04:00
percpu_up_write ( & cgroup_threadgroup_rwsem ) ;
2014-04-23 11:13:16 -04:00
return ret ;
}
2016-03-03 09:57:59 -05:00
/**
2016-03-03 09:58:00 -05:00
* cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
2016-03-03 09:57:59 -05:00
* @cgrp: root of the target subtree
2016-03-03 09:57:59 -05:00
*
* Because css offlining is asynchronous, userland may try to re-enable a
2016-03-03 09:58:00 -05:00
* controller while the previous css is still around. This function grabs
* cgroup_mutex and drains the previous css instances of @cgrp's subtree.
2016-03-03 09:57:59 -05:00
*/
2016-03-03 09:58:00 -05:00
static void cgroup_lock_and_drain_offline ( struct cgroup * cgrp )
__acquires ( & cgroup_mutex )
2016-03-03 09:57:59 -05:00
{
struct cgroup * dsct ;
2016-03-03 09:57:59 -05:00
struct cgroup_subsys_state * d_css ;
2016-03-03 09:57:59 -05:00
struct cgroup_subsys * ss ;
int ssid ;
2016-03-03 09:58:00 -05:00
restart :
mutex_lock ( & cgroup_mutex ) ;
2016-03-03 09:57:59 -05:00
2016-03-03 09:57:59 -05:00
cgroup_for_each_live_descendant_post ( dsct , d_css , cgrp ) {
2016-03-03 09:57:59 -05:00
for_each_subsys ( ss , ssid ) {
struct cgroup_subsys_state * css = cgroup_css ( dsct , ss ) ;
DEFINE_WAIT ( wait ) ;
2016-03-03 09:57:59 -05:00
if ( ! css | | ! percpu_ref_is_dying ( & css - > refcnt ) )
2016-03-03 09:57:59 -05:00
continue ;
cgroup_get ( dsct ) ;
prepare_to_wait ( & dsct - > offline_waitq , & wait ,
TASK_UNINTERRUPTIBLE ) ;
mutex_unlock ( & cgroup_mutex ) ;
schedule ( ) ;
finish_wait ( & dsct - > offline_waitq , & wait ) ;
cgroup_put ( dsct ) ;
2016-03-03 09:58:00 -05:00
goto restart ;
2016-03-03 09:57:59 -05:00
}
}
}
2016-03-03 09:57:59 -05:00
/**
* cgroup_save_control - save control masks of a subtree
* @cgrp: root of the target subtree
*
* Save ->subtree_control and ->subtree_ss_mask to the respective old_
* prefixed fields for @cgrp's subtree including @cgrp itself.
*/
static void cgroup_save_control ( struct cgroup * cgrp )
{
struct cgroup * dsct ;
struct cgroup_subsys_state * d_css ;
cgroup_for_each_live_descendant_pre ( dsct , d_css , cgrp ) {
dsct - > old_subtree_control = dsct - > subtree_control ;
dsct - > old_subtree_ss_mask = dsct - > subtree_ss_mask ;
}
}
/**
* cgroup_propagate_control - refresh control masks of a subtree
* @cgrp: root of the target subtree
*
* For @cgrp and its subtree, ensure ->subtree_ss_mask matches
* ->subtree_control and propagate controller availability through the
* subtree so that descendants don't have unavailable controllers enabled.
*/
static void cgroup_propagate_control ( struct cgroup * cgrp )
{
struct cgroup * dsct ;
struct cgroup_subsys_state * d_css ;
cgroup_for_each_live_descendant_pre ( dsct , d_css , cgrp ) {
dsct - > subtree_control & = cgroup_control ( dsct ) ;
2016-03-03 09:58:01 -05:00
dsct - > subtree_ss_mask =
cgroup_calc_subtree_ss_mask ( dsct - > subtree_control ,
cgroup_ss_mask ( dsct ) ) ;
2016-03-03 09:57:59 -05:00
}
}
/**
* cgroup_restore_control - restore control masks of a subtree
* @cgrp: root of the target subtree
*
* Restore ->subtree_control and ->subtree_ss_mask from the respective old_
* prefixed fields for @cgrp's subtree including @cgrp itself.
*/
static void cgroup_restore_control ( struct cgroup * cgrp )
{
struct cgroup * dsct ;
struct cgroup_subsys_state * d_css ;
cgroup_for_each_live_descendant_post ( dsct , d_css , cgrp ) {
dsct - > subtree_control = dsct - > old_subtree_control ;
dsct - > subtree_ss_mask = dsct - > old_subtree_ss_mask ;
}
}
2016-03-08 11:51:26 -05:00
static bool css_visible ( struct cgroup_subsys_state * css )
{
struct cgroup_subsys * ss = css - > ss ;
struct cgroup * cgrp = css - > cgroup ;
if ( cgroup_control ( cgrp ) & ( 1 < < ss - > id ) )
return true ;
if ( ! ( cgroup_ss_mask ( cgrp ) & ( 1 < < ss - > id ) ) )
return false ;
return cgroup_on_dfl ( cgrp ) & & ss - > implicit_on_dfl ;
}
2016-03-03 09:57:59 -05:00
/**
* cgroup_apply_control_enable - enable or show csses according to control
2016-03-03 09:57:59 -05:00
* @cgrp: root of the target subtree
2016-03-03 09:57:59 -05:00
*
2016-03-03 09:57:59 -05:00
* Walk @cgrp's subtree and create new csses or make the existing ones
2016-03-03 09:57:59 -05:00
* visible. A css is created invisible if it's being implicitly enabled
* through dependency. An invisible css is made visible when the userland
* explicitly enables it.
*
* Returns 0 on success, -errno on failure. On failure, csses which have
* been processed already aren't cleaned up. The caller is responsible for
* cleaning up with cgroup_apply_control_disble().
*/
static int cgroup_apply_control_enable ( struct cgroup * cgrp )
{
struct cgroup * dsct ;
2016-03-03 09:57:59 -05:00
struct cgroup_subsys_state * d_css ;
2016-03-03 09:57:59 -05:00
struct cgroup_subsys * ss ;
int ssid , ret ;
2016-03-03 09:57:59 -05:00
cgroup_for_each_live_descendant_pre ( dsct , d_css , cgrp ) {
2016-03-03 09:57:59 -05:00
for_each_subsys ( ss , ssid ) {
struct cgroup_subsys_state * css = cgroup_css ( dsct , ss ) ;
2016-03-03 09:58:00 -05:00
WARN_ON_ONCE ( css & & percpu_ref_is_dying ( & css - > refcnt ) ) ;
2016-03-03 09:57:59 -05:00
if ( ! ( cgroup_ss_mask ( dsct ) & ( 1 < < ss - > id ) ) )
continue ;
if ( ! css ) {
css = css_create ( dsct , ss ) ;
if ( IS_ERR ( css ) )
return PTR_ERR ( css ) ;
}
2016-03-08 11:51:26 -05:00
if ( css_visible ( css ) ) {
2016-03-03 09:58:01 -05:00
ret = css_populate_dir ( css ) ;
2016-03-03 09:57:59 -05:00
if ( ret )
return ret ;
}
}
}
return 0 ;
}
2016-03-03 09:57:59 -05:00
/**
* cgroup_apply_control_disable - kill or hide csses according to control
2016-03-03 09:57:59 -05:00
* @cgrp: root of the target subtree
2016-03-03 09:57:59 -05:00
*
2016-03-03 09:57:59 -05:00
* Walk @cgrp's subtree and kill and hide csses so that they match
2016-03-03 09:57:59 -05:00
* cgroup_ss_mask() and cgroup_visible_mask().
*
* A css is hidden when the userland requests it to be disabled while other
* subsystems are still depending on it. The css must not actively control
* resources and be in the vanilla state if it's made visible again later.
* Controllers which may be depended upon should provide ->css_reset() for
* this purpose.
*/
static void cgroup_apply_control_disable ( struct cgroup * cgrp )
{
struct cgroup * dsct ;
2016-03-03 09:57:59 -05:00
struct cgroup_subsys_state * d_css ;
2016-03-03 09:57:59 -05:00
struct cgroup_subsys * ss ;
int ssid ;
2016-03-03 09:57:59 -05:00
cgroup_for_each_live_descendant_post ( dsct , d_css , cgrp ) {
2016-03-03 09:57:59 -05:00
for_each_subsys ( ss , ssid ) {
struct cgroup_subsys_state * css = cgroup_css ( dsct , ss ) ;
2016-03-03 09:58:00 -05:00
WARN_ON_ONCE ( css & & percpu_ref_is_dying ( & css - > refcnt ) ) ;
2016-03-03 09:57:59 -05:00
if ( ! css )
continue ;
2016-03-03 09:58:01 -05:00
if ( css - > parent & &
! ( cgroup_ss_mask ( dsct ) & ( 1 < < ss - > id ) ) ) {
2016-03-03 09:57:59 -05:00
kill_css ( css ) ;
2016-03-08 11:51:26 -05:00
} else if ( ! css_visible ( css ) ) {
2016-03-03 09:58:01 -05:00
css_clear_dir ( css ) ;
2016-03-03 09:57:59 -05:00
if ( ss - > css_reset )
ss - > css_reset ( css ) ;
}
}
}
}
2016-03-03 09:58:00 -05:00
/**
* cgroup_apply_control - apply control mask updates to the subtree
* @cgrp: root of the target subtree
*
* subsystems can be enabled and disabled in a subtree using the following
* steps.
*
* 1. Call cgroup_save_control() to stash the current state.
* 2. Update ->subtree_control masks in the subtree as desired.
* 3. Call cgroup_apply_control() to apply the changes.
* 4. Optionally perform other related operations.
* 5. Call cgroup_finalize_control() to finish up.
*
* This function implements step 3 and propagates the mask changes
* throughout @cgrp's subtree, updates csses accordingly and perform
* process migrations.
*/
static int cgroup_apply_control ( struct cgroup * cgrp )
{
int ret ;
cgroup_propagate_control ( cgrp ) ;
ret = cgroup_apply_control_enable ( cgrp ) ;
if ( ret )
return ret ;
/*
* At this point, cgroup_e_css() results reflect the new csses
* making the following cgroup_update_dfl_csses() properly update
* css associations of all tasks in the subtree.
*/
ret = cgroup_update_dfl_csses ( cgrp ) ;
if ( ret )
return ret ;
return 0 ;
}
/**
* cgroup_finalize_control - finalize control mask update
* @cgrp: root of the target subtree
* @ret: the result of the update
*
* Finalize control mask update. See cgroup_apply_control() for more info.
*/
static void cgroup_finalize_control ( struct cgroup * cgrp , int ret )
{
if ( ret ) {
cgroup_restore_control ( cgrp ) ;
cgroup_propagate_control ( cgrp ) ;
}
cgroup_apply_control_disable ( cgrp ) ;
}
2014-04-23 11:13:16 -04:00
/* change the enabled child controllers for a cgroup in the default hierarchy */
2014-05-13 12:16:21 -04:00
static ssize_t cgroup_subtree_control_write ( struct kernfs_open_file * of ,
char * buf , size_t nbytes ,
loff_t off )
2014-04-23 11:13:16 -04:00
{
2016-02-22 22:25:47 -05:00
u16 enable = 0 , disable = 0 ;
2014-05-13 12:19:22 -04:00
struct cgroup * cgrp , * child ;
2014-04-23 11:13:16 -04:00
struct cgroup_subsys * ss ;
2014-05-13 12:16:21 -04:00
char * tok ;
2014-04-23 11:13:16 -04:00
int ssid , ret ;
/*
2014-05-13 12:10:59 -04:00
* Parse input - space separated list of subsystem names prefixed
* with either + or -.
2014-04-23 11:13:16 -04:00
*/
2014-05-13 12:16:21 -04:00
buf = strstrip ( buf ) ;
while ( ( tok = strsep ( & buf , " " ) ) ) {
2014-05-13 12:10:59 -04:00
if ( tok [ 0 ] = = ' \0 ' )
continue ;
2016-02-23 10:00:50 -05:00
do_each_subsys_mask ( ss , ssid , ~ cgrp_dfl_inhibit_ss_mask ) {
2015-09-18 11:56:28 -04:00
if ( ! cgroup_ssid_enabled ( ssid ) | |
strcmp ( tok + 1 , ss - > name ) )
2014-04-23 11:13:16 -04:00
continue ;
if ( * tok = = ' + ' ) {
2014-05-13 12:11:00 -04:00
enable | = 1 < < ssid ;
disable & = ~ ( 1 < < ssid ) ;
2014-04-23 11:13:16 -04:00
} else if ( * tok = = ' - ' ) {
2014-05-13 12:11:00 -04:00
disable | = 1 < < ssid ;
enable & = ~ ( 1 < < ssid ) ;
2014-04-23 11:13:16 -04:00
} else {
return - EINVAL ;
}
break ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2014-04-23 11:13:16 -04:00
if ( ssid = = CGROUP_SUBSYS_COUNT )
return - EINVAL ;
}
2016-03-03 09:58:00 -05:00
cgrp = cgroup_kn_lock_live ( of - > kn , true ) ;
2014-05-13 12:19:22 -04:00
if ( ! cgrp )
return - ENODEV ;
2014-04-23 11:13:16 -04:00
for_each_subsys ( ss , ssid ) {
if ( enable & ( 1 < < ssid ) ) {
2014-07-08 18:02:56 -04:00
if ( cgrp - > subtree_control & ( 1 < < ssid ) ) {
2014-04-23 11:13:16 -04:00
enable & = ~ ( 1 < < ssid ) ;
continue ;
}
2016-03-03 09:57:58 -05:00
if ( ! ( cgroup_control ( cgrp ) & ( 1 < < ssid ) ) ) {
2014-07-08 18:02:56 -04:00
ret = - ENOENT ;
goto out_unlock ;
}
2014-04-23 11:13:16 -04:00
} else if ( disable & ( 1 < < ssid ) ) {
2014-07-08 18:02:56 -04:00
if ( ! ( cgrp - > subtree_control & ( 1 < < ssid ) ) ) {
2014-04-23 11:13:16 -04:00
disable & = ~ ( 1 < < ssid ) ;
continue ;
}
/* a child has it enabled? */
cgroup_for_each_live_child ( child , cgrp ) {
2014-07-08 18:02:56 -04:00
if ( child - > subtree_control & ( 1 < < ssid ) ) {
2014-04-23 11:13:16 -04:00
ret = - EBUSY ;
2014-05-13 12:19:22 -04:00
goto out_unlock ;
2014-04-23 11:13:16 -04:00
}
}
}
}
if ( ! enable & & ! disable ) {
ret = 0 ;
2014-05-13 12:19:22 -04:00
goto out_unlock ;
2014-04-23 11:13:16 -04:00
}
/*
2014-07-08 18:02:56 -04:00
* Except for the root, subtree_control must be zero for a cgroup
2014-04-23 11:13:16 -04:00
* with tasks so that child cgroups don't compete against tasks.
*/
2016-09-23 16:55:49 -04:00
if ( enable & & cgroup_parent ( cgrp ) ) {
struct cgrp_cset_link * link ;
/*
* Because namespaces pin csets too, @cgrp->cset_links
* might not be empty even when @cgrp is empty. Walk and
* verify each cset.
*/
spin_lock_irq ( & css_set_lock ) ;
ret = 0 ;
list_for_each_entry ( link , & cgrp - > cset_links , cset_link ) {
if ( css_set_populated ( link - > cset ) ) {
ret = - EBUSY ;
break ;
}
}
spin_unlock_irq ( & css_set_lock ) ;
if ( ret )
goto out_unlock ;
2014-04-23 11:13:16 -04:00
}
2016-03-03 09:57:59 -05:00
/* save and update control masks and prepare csses */
cgroup_save_control ( cgrp ) ;
2014-07-08 18:02:56 -04:00
2016-03-03 09:57:59 -05:00
cgrp - > subtree_control | = enable ;
cgrp - > subtree_control & = ~ disable ;
2014-07-08 18:02:57 -04:00
2016-03-03 09:58:00 -05:00
ret = cgroup_apply_control ( cgrp ) ;
2014-11-18 02:49:50 -05:00
2016-03-03 09:58:00 -05:00
cgroup_finalize_control ( cgrp , ret ) ;
2014-04-23 11:13:16 -04:00
kernfs_activate ( cgrp - > kn ) ;
ret = 0 ;
out_unlock :
2014-05-13 12:19:22 -04:00
cgroup_kn_unlock ( of - > kn ) ;
2014-05-13 12:16:21 -04:00
return ret ? : nbytes ;
2014-04-23 11:13:16 -04:00
}
2015-09-18 17:54:22 -04:00
static int cgroup_events_show ( struct seq_file * seq , void * v )
2014-04-25 18:28:02 -04:00
{
2015-09-18 17:54:22 -04:00
seq_printf ( seq , " populated %d \n " ,
2015-10-15 16:41:50 -04:00
cgroup_is_populated ( seq_css ( seq ) - > cgroup ) ) ;
2014-04-25 18:28:02 -04:00
return 0 ;
}
2014-02-11 11:52:49 -05:00
static ssize_t cgroup_file_write ( struct kernfs_open_file * of , char * buf ,
size_t nbytes , loff_t off )
2007-10-18 23:39:30 -07:00
{
2014-02-11 11:52:49 -05:00
struct cgroup * cgrp = of - > kn - > parent - > priv ;
struct cftype * cft = of - > kn - > priv ;
struct cgroup_subsys_state * css ;
2013-12-05 12:28:03 -05:00
int ret ;
2007-10-18 23:39:30 -07:00
2014-05-13 12:16:21 -04:00
if ( cft - > write )
return cft - > write ( of , buf , nbytes , off ) ;
2014-02-11 11:52:49 -05:00
/*
* kernfs guarantees that a file isn't deleted with operations in
* flight, which means that the matching css is and stays alive and
* doesn't need to be pinned. The RCU locking is not necessary
* either. It's just for the convenience of using cgroup_css().
*/
rcu_read_lock ( ) ;
css = cgroup_css ( cgrp , cft - > ss ) ;
rcu_read_unlock ( ) ;
2007-10-18 23:39:30 -07:00
2014-05-13 12:16:21 -04:00
if ( cft - > write_u64 ) {
2013-12-05 12:28:03 -05:00
unsigned long long v ;
ret = kstrtoull ( buf , 0 , & v ) ;
if ( ! ret )
ret = cft - > write_u64 ( css , cft , v ) ;
} else if ( cft - > write_s64 ) {
long long v ;
ret = kstrtoll ( buf , 0 , & v ) ;
if ( ! ret )
ret = cft - > write_s64 ( css , cft , v ) ;
} else {
ret = - EINVAL ;
}
2014-02-11 11:52:49 -05:00
2013-12-05 12:28:03 -05:00
return ret ? : nbytes ;
2007-10-18 23:39:30 -07:00
}
2013-12-05 12:28:04 -05:00
static void * cgroup_seqfile_start ( struct seq_file * seq , loff_t * ppos )
2008-04-29 01:00:01 -07:00
{
2014-02-11 11:52:49 -05:00
return seq_cft ( seq ) - > seq_start ( seq , ppos ) ;
2013-12-05 12:28:04 -05:00
}
static void * cgroup_seqfile_next ( struct seq_file * seq , void * v , loff_t * ppos )
{
2014-02-11 11:52:49 -05:00
return seq_cft ( seq ) - > seq_next ( seq , v , ppos ) ;
2013-12-05 12:28:04 -05:00
}
static void cgroup_seqfile_stop ( struct seq_file * seq , void * v )
{
2014-02-11 11:52:49 -05:00
seq_cft ( seq ) - > seq_stop ( seq , v ) ;
2008-04-29 01:00:01 -07:00
}
static int cgroup_seqfile_show ( struct seq_file * m , void * arg )
{
2013-12-05 12:28:04 -05:00
struct cftype * cft = seq_cft ( m ) ;
struct cgroup_subsys_state * css = seq_css ( m ) ;
2013-07-31 17:36:25 +08:00
2013-12-05 12:28:04 -05:00
if ( cft - > seq_show )
return cft - > seq_show ( m , arg ) ;
2008-04-29 01:00:01 -07:00
2013-12-05 12:28:04 -05:00
if ( cft - > read_u64 )
seq_printf ( m , " %llu \n " , cft - > read_u64 ( css , cft ) ) ;
else if ( cft - > read_s64 )
seq_printf ( m , " %lld \n " , cft - > read_s64 ( css , cft ) ) ;
else
return - EINVAL ;
return 0 ;
2008-04-29 01:00:01 -07:00
}
2014-02-11 11:52:49 -05:00
static struct kernfs_ops cgroup_kf_single_ops = {
. atomic_write_len = PAGE_SIZE ,
. write = cgroup_file_write ,
. seq_show = cgroup_seqfile_show ,
2008-04-29 01:00:01 -07:00
} ;
2014-02-11 11:52:49 -05:00
static struct kernfs_ops cgroup_kf_ops = {
. atomic_write_len = PAGE_SIZE ,
. write = cgroup_file_write ,
. seq_start = cgroup_seqfile_start ,
. seq_next = cgroup_seqfile_next ,
. seq_stop = cgroup_seqfile_stop ,
. seq_show = cgroup_seqfile_show ,
} ;
2007-10-18 23:39:30 -07:00
/*
* cgroup_rename - Only allow simple rename of directories in place.
*/
2014-02-11 11:52:49 -05:00
static int cgroup_rename ( struct kernfs_node * kn , struct kernfs_node * new_parent ,
const char * new_name_str )
2007-10-18 23:39:30 -07:00
{
2014-02-11 11:52:49 -05:00
struct cgroup * cgrp = kn - > priv ;
2013-03-01 15:01:56 +08:00
int ret ;
2014-02-11 11:52:49 -05:00
if ( kernfs_type ( kn ) ! = KERNFS_DIR )
2007-10-18 23:39:30 -07:00
return - ENOTDIR ;
2014-02-11 11:52:49 -05:00
if ( kn - > parent ! = new_parent )
2007-10-18 23:39:30 -07:00
return - EIO ;
2013-03-01 15:01:56 +08:00
2013-06-14 11:18:22 -07:00
/*
* This isn't a proper migration and its usefulness is very
2014-07-09 10:08:08 -04:00
* limited. Disallow on the default hierarchy.
2013-06-14 11:18:22 -07:00
*/
2014-07-09 10:08:08 -04:00
if ( cgroup_on_dfl ( cgrp ) )
2013-06-14 11:18:22 -07:00
return - EPERM ;
2014-03-20 11:10:15 -04:00
/*
2014-05-13 12:19:23 -04:00
* We're gonna grab cgroup_mutex which nests outside kernfs
2014-03-20 11:10:15 -04:00
* active_ref. kernfs_rename() doesn't require active_ref
2014-05-13 12:19:23 -04:00
* protection. Break them before grabbing cgroup_mutex.
2014-03-20 11:10:15 -04:00
*/
kernfs_break_active_protection ( new_parent ) ;
kernfs_break_active_protection ( kn ) ;
2013-03-01 15:01:56 +08:00
2014-02-11 11:52:49 -05:00
mutex_lock ( & cgroup_mutex ) ;
2013-03-01 15:01:56 +08:00
2014-02-11 11:52:49 -05:00
ret = kernfs_rename ( kn , new_parent , new_name_str ) ;
2016-08-10 11:23:44 -04:00
if ( ! ret )
trace_cgroup_rename ( cgrp ) ;
2013-03-01 15:01:56 +08:00
2014-02-11 11:52:49 -05:00
mutex_unlock ( & cgroup_mutex ) ;
2007-10-18 23:39:30 -07:00
2014-03-20 11:10:15 -04:00
kernfs_unbreak_active_protection ( kn ) ;
kernfs_unbreak_active_protection ( new_parent ) ;
2014-02-11 11:52:49 -05:00
return ret ;
2009-04-02 16:57:29 -07:00
}
2014-04-07 16:44:47 -04:00
/* set uid and gid of cgroup dirs and files to that of the creator */
static int cgroup_kn_set_ugid ( struct kernfs_node * kn )
{
struct iattr iattr = { . ia_valid = ATTR_UID | ATTR_GID ,
. ia_uid = current_fsuid ( ) ,
. ia_gid = current_fsgid ( ) , } ;
if ( uid_eq ( iattr . ia_uid , GLOBAL_ROOT_UID ) & &
gid_eq ( iattr . ia_gid , GLOBAL_ROOT_GID ) )
return 0 ;
return kernfs_setattr ( kn , & iattr ) ;
}
2015-09-18 17:54:23 -04:00
static int cgroup_add_file ( struct cgroup_subsys_state * css , struct cgroup * cgrp ,
struct cftype * cft )
2007-10-18 23:39:30 -07:00
{
2014-02-11 11:52:48 -05:00
char name [ CGROUP_FILE_NAME_MAX ] ;
2014-02-11 11:52:49 -05:00
struct kernfs_node * kn ;
struct lock_class_key * key = NULL ;
2014-04-07 16:44:47 -04:00
int ret ;
2012-04-01 12:09:55 -07:00
2014-02-11 11:52:49 -05:00
# ifdef CONFIG_DEBUG_LOCK_ALLOC
key = & cft - > lockdep_key ;
# endif
kn = __kernfs_create_file ( cgrp - > kn , cgroup_file_name ( cgrp , cft , name ) ,
cgroup_file_mode ( cft ) , 0 , cft - > kf_ops , cft ,
2015-02-13 14:36:31 -08:00
NULL , key ) ;
2014-04-07 16:44:47 -04:00
if ( IS_ERR ( kn ) )
return PTR_ERR ( kn ) ;
ret = cgroup_kn_set_ugid ( kn ) ;
2014-04-23 11:13:16 -04:00
if ( ret ) {
2014-04-07 16:44:47 -04:00
kernfs_remove ( kn ) ;
2014-04-23 11:13:16 -04:00
return ret ;
}
2015-09-18 17:54:23 -04:00
if ( cft - > file_offset ) {
struct cgroup_file * cfile = ( void * ) css + cft - > file_offset ;
2015-11-05 00:12:24 -05:00
spin_lock_irq ( & cgroup_file_kn_lock ) ;
2015-09-18 17:54:23 -04:00
cfile - > kn = kn ;
2015-11-05 00:12:24 -05:00
spin_unlock_irq ( & cgroup_file_kn_lock ) ;
2015-09-18 17:54:23 -04:00
}
2014-04-23 11:13:16 -04:00
return 0 ;
2007-10-18 23:39:30 -07:00
}
2013-06-28 16:24:10 -07:00
/**
* cgroup_addrm_files - add or remove files to a cgroup directory
2015-09-18 17:54:23 -04:00
* @css: the target css
* @cgrp: the target cgroup (usually css->cgroup)
2013-06-28 16:24:10 -07:00
* @cfts: array of cftypes to be added
* @is_add: whether to add or remove
*
* Depending on @is_add, add or remove files defined by @cfts on @cgrp.
2015-09-18 17:54:23 -04:00
* For removals, this function never fails.
2013-06-28 16:24:10 -07:00
*/
2015-09-18 17:54:23 -04:00
static int cgroup_addrm_files ( struct cgroup_subsys_state * css ,
struct cgroup * cgrp , struct cftype cfts [ ] ,
2013-08-08 20:11:23 -04:00
bool is_add )
2007-10-18 23:39:30 -07:00
{
2015-09-18 17:54:23 -04:00
struct cftype * cft , * cft_end = NULL ;
2016-02-22 22:25:45 -05:00
int ret = 0 ;
2013-06-28 16:24:10 -07:00
2014-05-13 12:19:23 -04:00
lockdep_assert_held ( & cgroup_mutex ) ;
2012-04-01 12:09:55 -07:00
2015-09-18 17:54:23 -04:00
restart :
for ( cft = cfts ; cft ! = cft_end & & cft - > name [ 0 ] ! = ' \0 ' ; cft + + ) {
2012-12-06 14:38:57 +08:00
/* does cft->flags tell us to skip this file on @cgrp? */
2014-07-15 11:05:10 -04:00
if ( ( cft - > flags & __CFTYPE_ONLY_ON_DFL ) & & ! cgroup_on_dfl ( cgrp ) )
2014-03-19 10:23:55 -04:00
continue ;
2014-07-15 11:05:10 -04:00
if ( ( cft - > flags & __CFTYPE_NOT_ON_DFL ) & & cgroup_on_dfl ( cgrp ) )
2013-04-14 20:15:26 -07:00
continue ;
2014-05-16 13:22:48 -04:00
if ( ( cft - > flags & CFTYPE_NOT_ON_ROOT ) & & ! cgroup_parent ( cgrp ) )
2012-12-06 14:38:57 +08:00
continue ;
2014-05-16 13:22:48 -04:00
if ( ( cft - > flags & CFTYPE_ONLY_ON_ROOT ) & & cgroup_parent ( cgrp ) )
2012-12-06 14:38:57 +08:00
continue ;
2013-01-21 18:18:33 +08:00
if ( is_add ) {
2015-09-18 17:54:23 -04:00
ret = cgroup_add_file ( css , cgrp , cft ) ;
2013-06-28 16:24:10 -07:00
if ( ret ) {
2014-04-25 18:28:03 -04:00
pr_warn ( " %s: failed to add %s, err=%d \n " ,
__func__ , cft - > name , ret ) ;
2015-09-18 17:54:23 -04:00
cft_end = cft ;
is_add = false ;
goto restart ;
2013-06-28 16:24:10 -07:00
}
2013-01-21 18:18:33 +08:00
} else {
cgroup_rm_file ( cgrp , cft ) ;
2012-04-01 12:09:55 -07:00
}
2007-10-18 23:39:30 -07:00
}
2016-02-22 22:25:45 -05:00
return ret ;
2007-10-18 23:39:30 -07:00
}
2014-02-12 09:29:49 -05:00
static int cgroup_apply_cftypes ( struct cftype * cfts , bool is_add )
2012-04-01 12:09:55 -07:00
{
LIST_HEAD ( pending ) ;
2013-08-08 20:11:23 -04:00
struct cgroup_subsys * ss = cfts [ 0 ] . ss ;
2014-03-19 10:23:54 -04:00
struct cgroup * root = & ss - > root - > cgrp ;
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state * css ;
2013-06-28 16:24:11 -07:00
int ret = 0 ;
2012-04-01 12:09:55 -07:00
2014-05-13 12:19:23 -04:00
lockdep_assert_held ( & cgroup_mutex ) ;
2013-06-18 18:48:37 +08:00
/* add/rm files for all cgroups created before */
2013-08-26 18:40:56 -04:00
css_for_each_descendant_pre ( css , cgroup_css ( root , ss ) ) {
2013-08-08 20:11:25 -04:00
struct cgroup * cgrp = css - > cgroup ;
2016-03-03 09:57:58 -05:00
if ( ! ( css - > flags & CSS_VISIBLE ) )
2013-06-18 18:48:37 +08:00
continue ;
2015-09-18 17:54:23 -04:00
ret = cgroup_addrm_files ( css , cgrp , cfts , is_add ) ;
2013-06-28 16:24:11 -07:00
if ( ret )
break ;
2012-04-01 12:09:55 -07:00
}
2014-02-12 09:29:49 -05:00
if ( is_add & & ! ret )
kernfs_activate ( root - > kn ) ;
2013-06-28 16:24:11 -07:00
return ret ;
2012-04-01 12:09:55 -07:00
}
2014-02-11 11:52:48 -05:00
static void cgroup_exit_cftypes ( struct cftype * cfts )
{
struct cftype * cft ;
2014-02-11 11:52:49 -05:00
for ( cft = cfts ; cft - > name [ 0 ] ! = ' \0 ' ; cft + + ) {
/* free copy for custom atomic_write_len, see init_cftypes() */
if ( cft - > max_write_len & & cft - > max_write_len ! = PAGE_SIZE )
kfree ( cft - > kf_ops ) ;
cft - > kf_ops = NULL ;
2014-02-11 11:52:48 -05:00
cft - > ss = NULL ;
2014-07-15 11:05:10 -04:00
/* revert flags set by cgroup core while adding @cfts */
2014-07-15 11:05:10 -04:00
cft - > flags & = ~ ( __CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL ) ;
2014-02-11 11:52:49 -05:00
}
2014-02-11 11:52:48 -05:00
}
2014-02-11 11:52:49 -05:00
static int cgroup_init_cftypes ( struct cgroup_subsys * ss , struct cftype * cfts )
2014-02-11 11:52:48 -05:00
{
struct cftype * cft ;
2014-02-11 11:52:49 -05:00
for ( cft = cfts ; cft - > name [ 0 ] ! = ' \0 ' ; cft + + ) {
struct kernfs_ops * kf_ops ;
2014-02-12 09:29:48 -05:00
WARN_ON ( cft - > ss | | cft - > kf_ops ) ;
2014-02-11 11:52:49 -05:00
if ( cft - > seq_start )
kf_ops = & cgroup_kf_ops ;
else
kf_ops = & cgroup_kf_single_ops ;
/*
* Ugh... if @cft wants a custom max_write_len, we need to
* make a copy of kf_ops to set its atomic_write_len.
*/
if ( cft - > max_write_len & & cft - > max_write_len ! = PAGE_SIZE ) {
kf_ops = kmemdup ( kf_ops , sizeof ( * kf_ops ) , GFP_KERNEL ) ;
if ( ! kf_ops ) {
cgroup_exit_cftypes ( cfts ) ;
return - ENOMEM ;
}
kf_ops - > atomic_write_len = cft - > max_write_len ;
}
cft - > kf_ops = kf_ops ;
2014-02-11 11:52:48 -05:00
cft - > ss = ss ;
2014-02-11 11:52:49 -05:00
}
return 0 ;
2014-02-11 11:52:48 -05:00
}
2014-02-12 09:29:49 -05:00
static int cgroup_rm_cftypes_locked ( struct cftype * cfts )
{
2014-05-13 12:19:23 -04:00
lockdep_assert_held ( & cgroup_mutex ) ;
2014-02-12 09:29:49 -05:00
if ( ! cfts | | ! cfts [ 0 ] . ss )
return - ENOENT ;
list_del ( & cfts - > node ) ;
cgroup_apply_cftypes ( cfts , false ) ;
cgroup_exit_cftypes ( cfts ) ;
return 0 ;
}
2014-02-12 09:29:48 -05:00
/**
* cgroup_rm_cftypes - remove an array of cftypes from a subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Unregister @cfts. Files described by @cfts are removed from all
* existing cgroups and all future cgroups won't have them either. This
* function can be called anytime whether @cfts' subsys is attached or not.
*
* Returns 0 on successful unregistration, -ENOENT if @cfts is not
* registered.
*/
int cgroup_rm_cftypes ( struct cftype * cfts )
{
2014-02-12 09:29:49 -05:00
int ret ;
2014-02-12 09:29:48 -05:00
2014-05-13 12:19:23 -04:00
mutex_lock ( & cgroup_mutex ) ;
2014-02-12 09:29:49 -05:00
ret = cgroup_rm_cftypes_locked ( cfts ) ;
2014-05-13 12:19:23 -04:00
mutex_unlock ( & cgroup_mutex ) ;
2013-06-28 16:24:11 -07:00
return ret ;
2012-04-01 12:09:55 -07:00
}
/**
* cgroup_add_cftypes - add an array of cftypes to a subsystem
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Register @cfts to @ss. Files described by @cfts are created for all
* existing cgroups to which @ss is attached and all future cgroups will
* have them too. This function can be called anytime whether @ss is
* attached or not.
*
* Returns 0 on successful registration, -errno on failure. Note that this
* function currently returns 0 as long as @cfts registration is successful
* even if some file creation attempts on existing cgroups fail.
*/
2014-07-15 11:05:09 -04:00
static int cgroup_add_cftypes ( struct cgroup_subsys * ss , struct cftype * cfts )
2012-04-01 12:09:55 -07:00
{
2013-06-28 16:24:11 -07:00
int ret ;
2012-04-01 12:09:55 -07:00
2015-09-18 11:56:28 -04:00
if ( ! cgroup_ssid_enabled ( ss - > id ) )
2014-06-05 17:16:30 +08:00
return 0 ;
2014-02-17 10:41:50 +08:00
if ( ! cfts | | cfts [ 0 ] . name [ 0 ] = = ' \0 ' )
return 0 ;
2012-04-01 12:09:55 -07:00
2014-02-11 11:52:49 -05:00
ret = cgroup_init_cftypes ( ss , cfts ) ;
2013-06-28 16:24:11 -07:00
if ( ret )
2014-02-11 11:52:49 -05:00
return ret ;
2013-08-08 20:11:23 -04:00
2014-05-13 12:19:23 -04:00
mutex_lock ( & cgroup_mutex ) ;
2014-02-12 09:29:49 -05:00
2014-02-12 09:29:48 -05:00
list_add_tail ( & cfts - > node , & ss - > cfts ) ;
2014-02-12 09:29:49 -05:00
ret = cgroup_apply_cftypes ( cfts , true ) ;
2013-06-28 16:24:11 -07:00
if ( ret )
2014-02-12 09:29:49 -05:00
cgroup_rm_cftypes_locked ( cfts ) ;
2014-05-13 12:19:23 -04:00
mutex_unlock ( & cgroup_mutex ) ;
2013-06-28 16:24:11 -07:00
return ret ;
2012-04-01 12:09:55 -07:00
}
2012-04-01 12:09:56 -07:00
2014-07-15 11:05:10 -04:00
/**
* cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Similar to cgroup_add_cftypes() but the added files are only used for
* the default hierarchy.
*/
int cgroup_add_dfl_cftypes ( struct cgroup_subsys * ss , struct cftype * cfts )
{
struct cftype * cft ;
for ( cft = cfts ; cft & & cft - > name [ 0 ] ! = ' \0 ' ; cft + + )
2014-07-15 11:05:10 -04:00
cft - > flags | = __CFTYPE_ONLY_ON_DFL ;
2014-07-15 11:05:10 -04:00
return cgroup_add_cftypes ( ss , cfts ) ;
}
/**
* cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
* @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
* Similar to cgroup_add_cftypes() but the added files are only used for
* the legacy hierarchies.
*/
2014-07-15 11:05:09 -04:00
int cgroup_add_legacy_cftypes ( struct cgroup_subsys * ss , struct cftype * cfts )
{
2014-07-15 11:05:10 -04:00
struct cftype * cft ;
2015-10-15 17:00:43 -04:00
for ( cft = cfts ; cft & & cft - > name [ 0 ] ! = ' \0 ' ; cft + + )
cft - > flags | = __CFTYPE_NOT_ON_DFL ;
2014-07-15 11:05:09 -04:00
return cgroup_add_cftypes ( ss , cfts ) ;
}
2015-11-05 00:12:24 -05:00
/**
* cgroup_file_notify - generate a file modified event for a cgroup_file
* @cfile: target cgroup_file
*
* @cfile must have been obtained by setting cftype->file_offset.
*/
void cgroup_file_notify ( struct cgroup_file * cfile )
{
unsigned long flags ;
spin_lock_irqsave ( & cgroup_file_kn_lock , flags ) ;
if ( cfile - > kn )
kernfs_notify ( cfile - > kn ) ;
spin_unlock_irqrestore ( & cgroup_file_kn_lock , flags ) ;
}
2008-02-23 15:24:09 -08:00
/**
* cgroup_task_count - count the number of tasks in a cgroup.
* @cgrp: the cgroup in question
*
2016-09-23 16:55:49 -04:00
* Return the number of tasks in the cgroup. The returned number can be
* higher than the actual number of tasks due to css_set references from
* namespace roots and temporary usages.
2008-02-23 15:24:09 -08:00
*/
2014-02-13 06:58:39 -05:00
static int cgroup_task_count ( const struct cgroup * cgrp )
2007-10-18 23:39:32 -07:00
{
int count = 0 ;
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link ;
2007-10-18 23:39:32 -07:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2013-06-12 21:04:50 -07:00
list_for_each_entry ( link , & cgrp - > cset_links , cset_link )
count + = atomic_read ( & link - > cset - > refcount ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2007-10-18 23:39:32 -07:00
return count ;
}
2013-05-24 10:55:38 +09:00
/**
2013-08-08 20:11:25 -04:00
* css_next_child - find the next child of a given css
2014-05-16 13:22:51 -04:00
* @pos: the current position (%NULL to initiate traversal)
* @parent: css whose children to walk
2013-05-24 10:55:38 +09:00
*
2014-05-16 13:22:51 -04:00
* This function returns the next child of @parent and should be called
2013-12-06 15:11:55 -05:00
* under either cgroup_mutex or RCU read lock. The only requirement is
2014-05-16 13:22:51 -04:00
* that @parent and @pos are accessible. The next sibling is guaranteed to
* be returned regardless of their states.
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
2013-05-24 10:55:38 +09:00
*/
2014-05-16 13:22:51 -04:00
struct cgroup_subsys_state * css_next_child ( struct cgroup_subsys_state * pos ,
struct cgroup_subsys_state * parent )
2013-05-24 10:55:38 +09:00
{
2014-05-16 13:22:51 -04:00
struct cgroup_subsys_state * next ;
2013-05-24 10:55:38 +09:00
2014-05-13 12:19:23 -04:00
cgroup_assert_mutex_or_rcu_locked ( ) ;
2013-05-24 10:55:38 +09:00
/*
2014-05-16 13:22:49 -04:00
* @pos could already have been unlinked from the sibling list.
* Once a cgroup is removed, its ->sibling.next is no longer
* updated when its next sibling changes. CSS_RELEASED is set when
* @pos is taken off list, at which time its next pointer is valid,
* and, as releases are serialized, the one pointed to by the next
* pointer is guaranteed to not have started release yet. This
* implies that if we observe !CSS_RELEASED on @pos in this RCU
* critical section, the one pointed to by its next pointer is
* guaranteed to not have finished its RCU grace period even if we
* have dropped rcu_read_lock() inbetween iterations.
2013-08-08 20:11:24 -04:00
*
2014-05-16 13:22:49 -04:00
* If @pos has CSS_RELEASED set, its next pointer can't be
* dereferenced; however, as each css is given a monotonically
* increasing unique serial number and always appended to the
* sibling list, the next one can be found by walking the parent's
* children until the first css with higher serial number than
* @pos's. While this path can be slower, it happens iff iteration
* races against release and the race window is very small.
2013-05-24 10:55:38 +09:00
*/
2013-08-08 20:11:24 -04:00
if ( ! pos ) {
2014-05-16 13:22:51 -04:00
next = list_entry_rcu ( parent - > children . next , struct cgroup_subsys_state , sibling ) ;
} else if ( likely ( ! ( pos - > flags & CSS_RELEASED ) ) ) {
next = list_entry_rcu ( pos - > sibling . next , struct cgroup_subsys_state , sibling ) ;
2013-08-08 20:11:24 -04:00
} else {
2014-05-16 13:22:51 -04:00
list_for_each_entry_rcu ( next , & parent - > children , sibling )
2013-08-08 20:11:24 -04:00
if ( next - > serial_nr > pos - > serial_nr )
break ;
2013-05-24 10:55:38 +09:00
}
2014-04-23 11:13:15 -04:00
/*
* @next, if not pointing to the head, can be dereferenced and is
2014-05-16 13:22:51 -04:00
* the next sibling.
2014-04-23 11:13:15 -04:00
*/
2014-05-16 13:22:51 -04:00
if ( & next - > sibling ! = & parent - > children )
return next ;
2014-04-23 11:13:15 -04:00
return NULL ;
2013-05-24 10:55:38 +09:00
}
2012-11-09 09:12:29 -08:00
/**
2013-08-08 20:11:25 -04:00
* css_next_descendant_pre - find the next descendant for pre-order walk
2012-11-09 09:12:29 -08:00
* @pos: the current position (%NULL to initiate traversal)
2013-08-08 20:11:25 -04:00
* @root: css whose descendants to walk
2012-11-09 09:12:29 -08:00
*
2013-08-08 20:11:25 -04:00
* To be used by css_for_each_descendant_pre(). Find the next descendant
2013-08-08 20:11:27 -04:00
* to visit for pre-order traversal of @root's descendants. @root is
* included in the iteration and the first node to be visited.
2013-05-24 10:55:38 +09:00
*
2013-12-06 15:11:55 -05:00
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
* section. This function will return the correct next descendant as long
* as both @pos and @root are accessible and @pos is a descendant of @root.
2014-05-16 13:22:51 -04:00
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
2012-11-09 09:12:29 -08:00
*/
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state *
css_next_descendant_pre ( struct cgroup_subsys_state * pos ,
struct cgroup_subsys_state * root )
2012-11-09 09:12:29 -08:00
{
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state * next ;
2012-11-09 09:12:29 -08:00
2014-05-13 12:19:23 -04:00
cgroup_assert_mutex_or_rcu_locked ( ) ;
2012-11-09 09:12:29 -08:00
2013-08-08 20:11:27 -04:00
/* if first iteration, visit @root */
2013-05-24 10:50:24 +09:00
if ( ! pos )
2013-08-08 20:11:27 -04:00
return root ;
2012-11-09 09:12:29 -08:00
/* visit the first child if exists */
2013-08-08 20:11:25 -04:00
next = css_next_child ( NULL , pos ) ;
2012-11-09 09:12:29 -08:00
if ( next )
return next ;
/* no child, visit my or the closest ancestor's next sibling */
2013-08-08 20:11:25 -04:00
while ( pos ! = root ) {
2014-05-16 13:22:48 -04:00
next = css_next_child ( pos , pos - > parent ) ;
2013-05-24 10:55:38 +09:00
if ( next )
2012-11-09 09:12:29 -08:00
return next ;
2014-05-16 13:22:48 -04:00
pos = pos - > parent ;
2013-05-24 10:50:24 +09:00
}
2012-11-09 09:12:29 -08:00
return NULL ;
}
2013-01-07 08:49:33 -08:00
/**
2013-08-08 20:11:25 -04:00
* css_rightmost_descendant - return the rightmost descendant of a css
* @pos: css of interest
2013-01-07 08:49:33 -08:00
*
2013-08-08 20:11:25 -04:00
* Return the rightmost descendant of @pos. If there's no descendant, @pos
* is returned. This can be used during pre-order traversal to skip
2013-01-07 08:49:33 -08:00
* subtree of @pos.
2013-05-24 10:55:38 +09:00
*
2013-12-06 15:11:55 -05:00
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
* section. This function will return the correct rightmost descendant as
* long as @pos is accessible.
2013-01-07 08:49:33 -08:00
*/
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state *
css_rightmost_descendant ( struct cgroup_subsys_state * pos )
2013-01-07 08:49:33 -08:00
{
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state * last , * tmp ;
2013-01-07 08:49:33 -08:00
2014-05-13 12:19:23 -04:00
cgroup_assert_mutex_or_rcu_locked ( ) ;
2013-01-07 08:49:33 -08:00
do {
last = pos ;
/* ->prev isn't RCU safe, walk ->next till the end */
pos = NULL ;
2013-08-08 20:11:25 -04:00
css_for_each_child ( tmp , last )
2013-01-07 08:49:33 -08:00
pos = tmp ;
} while ( pos ) ;
return last ;
}
2013-08-08 20:11:25 -04:00
static struct cgroup_subsys_state *
css_leftmost_descendant ( struct cgroup_subsys_state * pos )
2012-11-09 09:12:29 -08:00
{
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state * last ;
2012-11-09 09:12:29 -08:00
do {
last = pos ;
2013-08-08 20:11:25 -04:00
pos = css_next_child ( NULL , pos ) ;
2012-11-09 09:12:29 -08:00
} while ( pos ) ;
return last ;
}
/**
2013-08-08 20:11:25 -04:00
* css_next_descendant_post - find the next descendant for post-order walk
2012-11-09 09:12:29 -08:00
* @pos: the current position (%NULL to initiate traversal)
2013-08-08 20:11:25 -04:00
* @root: css whose descendants to walk
2012-11-09 09:12:29 -08:00
*
2013-08-08 20:11:25 -04:00
* To be used by css_for_each_descendant_post(). Find the next descendant
2013-08-08 20:11:27 -04:00
* to visit for post-order traversal of @root's descendants. @root is
* included in the iteration and the last node to be visited.
2013-05-24 10:55:38 +09:00
*
2013-12-06 15:11:55 -05:00
* While this function requires cgroup_mutex or RCU read locking, it
* doesn't require the whole traversal to be contained in a single critical
* section. This function will return the correct next descendant as long
* as both @pos and @cgroup are accessible and @pos is a descendant of
* @cgroup.
2014-05-16 13:22:51 -04:00
*
* If a subsystem synchronizes ->css_online() and the start of iteration, a
* css which finished ->css_online() is guaranteed to be visible in the
* future iterations and will stay visible until the last reference is put.
* A css which hasn't finished ->css_online() or already finished
* ->css_offline() may show up during traversal. It's each subsystem's
* responsibility to synchronize against on/offlining.
2012-11-09 09:12:29 -08:00
*/
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state *
css_next_descendant_post ( struct cgroup_subsys_state * pos ,
struct cgroup_subsys_state * root )
2012-11-09 09:12:29 -08:00
{
2013-08-08 20:11:25 -04:00
struct cgroup_subsys_state * next ;
2012-11-09 09:12:29 -08:00
2014-05-13 12:19:23 -04:00
cgroup_assert_mutex_or_rcu_locked ( ) ;
2012-11-09 09:12:29 -08:00
2013-09-06 15:31:08 -04:00
/* if first iteration, visit leftmost descendant which may be @root */
if ( ! pos )
return css_leftmost_descendant ( root ) ;
2012-11-09 09:12:29 -08:00
2013-08-08 20:11:27 -04:00
/* if we visited @root, we're done */
if ( pos = = root )
return NULL ;
2012-11-09 09:12:29 -08:00
/* if there's an unvisited sibling, visit its leftmost descendant */
2014-05-16 13:22:48 -04:00
next = css_next_child ( pos , pos - > parent ) ;
2013-05-24 10:55:38 +09:00
if ( next )
2013-08-08 20:11:25 -04:00
return css_leftmost_descendant ( next ) ;
2012-11-09 09:12:29 -08:00
/* no sibling left, visit parent */
2014-05-16 13:22:48 -04:00
return pos - > parent ;
2012-11-09 09:12:29 -08:00
}
2014-05-16 13:22:52 -04:00
/**
* css_has_online_children - does a css have online children
* @css: the target css
*
* Returns %true if @css has any online children; otherwise, %false. This
* function can be called from any context but the caller is responsible
* for synchronizing against on/offlining as necessary.
*/
bool css_has_online_children ( struct cgroup_subsys_state * css )
2014-05-14 09:15:01 -04:00
{
2014-05-16 13:22:52 -04:00
struct cgroup_subsys_state * child ;
bool ret = false ;
2014-05-14 09:15:01 -04:00
rcu_read_lock ( ) ;
2014-05-16 13:22:52 -04:00
css_for_each_child ( child , css ) {
2014-06-12 14:31:31 +08:00
if ( child - > flags & CSS_ONLINE ) {
2014-05-16 13:22:52 -04:00
ret = true ;
break ;
2014-05-14 09:15:01 -04:00
}
}
rcu_read_unlock ( ) ;
2014-05-16 13:22:52 -04:00
return ret ;
2012-11-09 09:12:29 -08:00
}
2013-08-08 20:11:26 -04:00
/**
2015-10-15 16:41:52 -04:00
* css_task_iter_advance_css_set - advance a task itererator to the next css_set
2013-08-08 20:11:26 -04:00
* @it: the iterator to advance
*
* Advance @it to the next css_set to walk.
2013-08-08 20:11:26 -04:00
*/
2015-10-15 16:41:52 -04:00
static void css_task_iter_advance_css_set ( struct css_task_iter * it )
2013-08-08 20:11:26 -04:00
{
2014-04-23 11:13:15 -04:00
struct list_head * l = it - > cset_pos ;
2013-08-08 20:11:26 -04:00
struct cgrp_cset_link * link ;
struct css_set * cset ;
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
2013-08-08 20:11:26 -04:00
/* Advance to the next non-empty css_set */
do {
l = l - > next ;
2014-04-23 11:13:15 -04:00
if ( l = = it - > cset_head ) {
it - > cset_pos = NULL ;
2015-10-15 16:41:52 -04:00
it - > task_pos = NULL ;
2013-08-08 20:11:26 -04:00
return ;
}
2014-04-23 11:13:15 -04:00
if ( it - > ss ) {
cset = container_of ( l , struct css_set ,
e_cset_node [ it - > ss - > id ] ) ;
} else {
link = list_entry ( l , struct cgrp_cset_link , cset_link ) ;
cset = link - > cset ;
}
2015-10-15 16:41:49 -04:00
} while ( ! css_set_populated ( cset ) ) ;
2014-02-25 10:04:01 -05:00
2014-04-23 11:13:15 -04:00
it - > cset_pos = l ;
2014-02-25 10:04:01 -05:00
if ( ! list_empty ( & cset - > tasks ) )
2014-04-23 11:13:15 -04:00
it - > task_pos = cset - > tasks . next ;
2014-02-25 10:04:01 -05:00
else
2014-04-23 11:13:15 -04:00
it - > task_pos = cset - > mg_tasks . next ;
it - > tasks_head = & cset - > tasks ;
it - > mg_tasks_head = & cset - > mg_tasks ;
2015-10-15 16:41:52 -04:00
/*
* We don't keep css_sets locked across iteration steps and thus
* need to take steps to ensure that iteration can be resumed after
* the lock is re-acquired. Iteration is performed at two levels -
* css_sets and tasks in them.
*
* Once created, a css_set never leaves its cgroup lists, so a
* pinned css_set is guaranteed to stay put and we can resume
* iteration afterwards.
*
* Tasks may leave @cset across iteration steps. This is resolved
* by registering each iterator with the css_set currently being
* walked and making css_set_move_task() advance iterators whose
* next task is leaving.
*/
if ( it - > cur_cset ) {
list_del ( & it - > iters_node ) ;
put_css_set_locked ( it - > cur_cset ) ;
}
get_css_set ( cset ) ;
it - > cur_cset = cset ;
list_add ( & it - > iters_node , & cset - > task_iters ) ;
2013-08-08 20:11:26 -04:00
}
2015-10-15 16:41:52 -04:00
static void css_task_iter_advance ( struct css_task_iter * it )
{
struct list_head * l = it - > task_pos ;
2015-10-15 16:41:53 -04:00
lockdep_assert_held ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
WARN_ON_ONCE ( ! l ) ;
/*
* Advance iterator to find next entry. cset->tasks is consumed
* first and then ->mg_tasks. After ->mg_tasks, we move onto the
* next cset.
*/
l = l - > next ;
if ( l = = it - > tasks_head )
l = it - > mg_tasks_head - > next ;
if ( l = = it - > mg_tasks_head )
css_task_iter_advance_css_set ( it ) ;
else
it - > task_pos = l ;
}
2013-08-08 20:11:26 -04:00
/**
2013-08-08 20:11:26 -04:00
* css_task_iter_start - initiate task iteration
* @css: the css to walk tasks of
2013-08-08 20:11:26 -04:00
* @it: the task iterator to use
*
2013-08-08 20:11:26 -04:00
* Initiate iteration through the tasks of @css. The caller can call
* css_task_iter_next() to walk through the tasks until the function
* returns NULL. On completion of iteration, css_task_iter_end() must be
* called.
2013-08-08 20:11:26 -04:00
*/
2013-08-08 20:11:26 -04:00
void css_task_iter_start ( struct cgroup_subsys_state * css ,
struct css_task_iter * it )
2007-10-18 23:39:36 -07:00
{
2014-02-13 06:58:38 -05:00
/* no one should try to iterate before mounting cgroups */
WARN_ON_ONCE ( ! use_task_css_set_links ) ;
2008-02-07 00:14:42 -08:00
2015-10-15 16:41:52 -04:00
memset ( it , 0 , sizeof ( * it ) ) ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2013-08-08 20:11:26 -04:00
2014-04-23 11:13:15 -04:00
it - > ss = css - > ss ;
if ( it - > ss )
it - > cset_pos = & css - > cgroup - > e_csets [ css - > ss - > id ] ;
else
it - > cset_pos = & css - > cgroup - > cset_links ;
2014-04-23 11:13:15 -04:00
it - > cset_head = it - > cset_pos ;
2013-08-08 20:11:26 -04:00
2015-10-15 16:41:52 -04:00
css_task_iter_advance_css_set ( it ) ;
2015-10-15 16:41:52 -04:00
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2007-10-18 23:39:36 -07:00
}
2013-08-08 20:11:26 -04:00
/**
2013-08-08 20:11:26 -04:00
* css_task_iter_next - return the next task for the iterator
2013-08-08 20:11:26 -04:00
* @it: the task iterator being iterated
*
* The "next" function for task iteration. @it should have been
2013-08-08 20:11:26 -04:00
* initialized via css_task_iter_start(). Returns NULL when the iteration
* reaches the end.
2013-08-08 20:11:26 -04:00
*/
2013-08-08 20:11:26 -04:00
struct task_struct * css_task_iter_next ( struct css_task_iter * it )
2007-10-18 23:39:36 -07:00
{
2015-10-29 11:43:05 +09:00
if ( it - > cur_task ) {
2015-10-15 16:41:52 -04:00
put_task_struct ( it - > cur_task ) ;
2015-10-29 11:43:05 +09:00
it - > cur_task = NULL ;
}
2015-10-15 16:41:52 -04:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
2015-10-29 11:43:05 +09:00
if ( it - > task_pos ) {
it - > cur_task = list_entry ( it - > task_pos , struct task_struct ,
cg_list ) ;
get_task_struct ( it - > cur_task ) ;
css_task_iter_advance ( it ) ;
}
2015-10-15 16:41:52 -04:00
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
return it - > cur_task ;
2007-10-18 23:39:36 -07:00
}
2013-08-08 20:11:26 -04:00
/**
2013-08-08 20:11:26 -04:00
* css_task_iter_end - finish task iteration
2013-08-08 20:11:26 -04:00
* @it: the task iterator to finish
*
2013-08-08 20:11:26 -04:00
* Finish task iteration started by css_task_iter_start().
2013-08-08 20:11:26 -04:00
*/
2013-08-08 20:11:26 -04:00
void css_task_iter_end ( struct css_task_iter * it )
2007-10-18 23:39:36 -07:00
{
2015-10-15 16:41:52 -04:00
if ( it - > cur_cset ) {
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
list_del ( & it - > iters_node ) ;
put_css_set_locked ( it - > cur_cset ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
}
if ( it - > cur_task )
put_task_struct ( it - > cur_task ) ;
2013-04-07 09:29:50 -07:00
}
/**
* cgroup_trasnsfer_tasks - move tasks from one cgroup to another
* @to: cgroup to which the tasks will be moved
* @from: cgroup in which the tasks currently reside
2014-02-25 10:04:03 -05:00
*
* Locking rules between cgroup_post_fork() and the migration path
* guarantee that, if a task is forking while being migrated, the new child
* is guaranteed to be either visible in the source cgroup after the
* parent's migration is complete or put into the target cgroup. No task
* can slip out of migration through forking.
2013-04-07 09:29:50 -07:00
*/
int cgroup_transfer_tasks ( struct cgroup * to , struct cgroup * from )
{
2014-02-25 10:04:03 -05:00
LIST_HEAD ( preloaded_csets ) ;
struct cgrp_cset_link * link ;
2014-02-13 06:58:39 -05:00
struct css_task_iter it ;
struct task_struct * task ;
2014-02-25 10:04:03 -05:00
int ret ;
2014-02-13 06:58:39 -05:00
2016-03-08 11:51:25 -05:00
if ( ! cgroup_may_migrate_to ( to ) )
return - EBUSY ;
2014-02-25 10:04:03 -05:00
mutex_lock ( & cgroup_mutex ) ;
2016-07-15 06:35:51 -05:00
percpu_down_write ( & cgroup_threadgroup_rwsem ) ;
2014-02-25 10:04:03 -05:00
/* all tasks in @from are being moved, all csets are source */
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
list_for_each_entry ( link , & from - > cset_links , cset_link )
cgroup_migrate_add_src ( link - > cset , to , & preloaded_csets ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
2016-03-08 11:51:26 -05:00
ret = cgroup_migrate_prepare_dst ( & preloaded_csets ) ;
2014-02-25 10:04:03 -05:00
if ( ret )
goto out_err ;
/*
2016-01-09 23:33:06 +02:00
* Migrate tasks one-by-one until @from is empty. This fails iff
2014-02-25 10:04:03 -05:00
* ->can_attach() fails.
*/
2014-02-13 06:58:39 -05:00
do {
2014-05-14 09:15:00 -04:00
css_task_iter_start ( & from - > self , & it ) ;
2014-02-13 06:58:39 -05:00
task = css_task_iter_next ( & it ) ;
if ( task )
get_task_struct ( task ) ;
css_task_iter_end ( & it ) ;
if ( task ) {
2016-03-08 11:51:26 -05:00
ret = cgroup_migrate ( task , false , to - > root ) ;
2016-08-10 11:23:44 -04:00
if ( ! ret )
trace_cgroup_transfer_tasks ( to , task , false ) ;
2014-02-13 06:58:39 -05:00
put_task_struct ( task ) ;
}
} while ( task & & ! ret ) ;
2014-02-25 10:04:03 -05:00
out_err :
cgroup_migrate_finish ( & preloaded_csets ) ;
2016-07-15 06:35:51 -05:00
percpu_up_write ( & cgroup_threadgroup_rwsem ) ;
2014-02-25 10:04:03 -05:00
mutex_unlock ( & cgroup_mutex ) ;
2014-02-13 06:58:39 -05:00
return ret ;
2013-04-07 09:29:50 -07:00
}
2007-10-18 23:39:32 -07:00
/*
2009-09-23 15:56:26 -07:00
* Stuff for reading the 'tasks'/'procs' files.
2007-10-18 23:39:32 -07:00
*
* Reading this file can return large amounts of data if a cgroup has
* *lots* of attached tasks. So it may need several calls to read(),
* but we cannot guarantee that the information we produce is correct
* unless we produce it entirely atomically.
*
*/
2012-01-20 11:58:43 +08:00
/* which pidlist file are we talking about? */
enum cgroup_filetype {
CGROUP_FILE_PROCS ,
CGROUP_FILE_TASKS ,
} ;
/*
* A pidlist is a list of pids that virtually represents the contents of one
* of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
* a pair (one each for procs, tasks) for each pid namespace that's relevant
* to the cgroup.
*/
struct cgroup_pidlist {
/*
* used to find which pidlist is wanted. doesn't change as long as
* this particular list stays in the list.
*/
struct { enum cgroup_filetype type ; struct pid_namespace * ns ; } key ;
/* array of xids */
pid_t * list ;
/* how many elements the above list has */
int length ;
/* each of these stored in a list by its cgroup */
struct list_head links ;
/* pointer to the cgroup we belong to, for list removal purposes */
struct cgroup * owner ;
2013-11-29 10:42:58 -05:00
/* for delayed destruction */
struct delayed_work destroy_dwork ;
2012-01-20 11:58:43 +08:00
} ;
2009-09-23 15:56:28 -07:00
/*
* The following two functions "fix" the issue where there are more pids
* than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
* TODO: replace with a kernel-wide solution to this problem
*/
# define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
static void * pidlist_allocate ( int count )
{
if ( PIDLIST_TOO_LARGE ( count ) )
return vmalloc ( count * sizeof ( pid_t ) ) ;
else
return kmalloc ( count * sizeof ( pid_t ) , GFP_KERNEL ) ;
}
2013-11-29 10:42:58 -05:00
2009-09-23 15:56:28 -07:00
static void pidlist_free ( void * p )
{
2015-03-02 17:51:10 -05:00
kvfree ( p ) ;
2009-09-23 15:56:28 -07:00
}
2013-11-29 10:42:58 -05:00
/*
* Used to destroy all pidlists lingering waiting for destroy timer. None
* should be left afterwards.
*/
static void cgroup_pidlist_destroy_all ( struct cgroup * cgrp )
{
struct cgroup_pidlist * l , * tmp_l ;
mutex_lock ( & cgrp - > pidlist_mutex ) ;
list_for_each_entry_safe ( l , tmp_l , & cgrp - > pidlists , links )
mod_delayed_work ( cgroup_pidlist_destroy_wq , & l - > destroy_dwork , 0 ) ;
mutex_unlock ( & cgrp - > pidlist_mutex ) ;
flush_workqueue ( cgroup_pidlist_destroy_wq ) ;
BUG_ON ( ! list_empty ( & cgrp - > pidlists ) ) ;
}
static void cgroup_pidlist_destroy_work_fn ( struct work_struct * work )
{
struct delayed_work * dwork = to_delayed_work ( work ) ;
struct cgroup_pidlist * l = container_of ( dwork , struct cgroup_pidlist ,
destroy_dwork ) ;
struct cgroup_pidlist * tofree = NULL ;
mutex_lock ( & l - > owner - > pidlist_mutex ) ;
/*
2013-11-29 10:42:59 -05:00
* Destroy iff we didn't get queued again. The state won't change
* as destroy_dwork can only be queued while locked.
2013-11-29 10:42:58 -05:00
*/
2013-11-29 10:42:59 -05:00
if ( ! delayed_work_pending ( dwork ) ) {
2013-11-29 10:42:58 -05:00
list_del ( & l - > links ) ;
pidlist_free ( l - > list ) ;
put_pid_ns ( l - > key . ns ) ;
tofree = l ;
}
mutex_unlock ( & l - > owner - > pidlist_mutex ) ;
kfree ( tofree ) ;
}
2007-10-18 23:39:32 -07:00
/*
2009-09-23 15:56:26 -07:00
* pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
2013-03-12 15:36:00 -07:00
* Returns the number of unique elements.
2007-10-18 23:39:32 -07:00
*/
2013-03-12 15:36:00 -07:00
static int pidlist_uniq ( pid_t * list , int length )
2007-10-18 23:39:32 -07:00
{
2009-09-23 15:56:26 -07:00
int src , dest = 1 ;
/*
* we presume the 0th element is unique, so i starts at 1. trivial
* edge cases first; no work needs to be done for either
*/
if ( length = = 0 | | length = = 1 )
return length ;
/* src and dest walk down the list; dest counts unique elements */
for ( src = 1 ; src < length ; src + + ) {
/* find next unique element */
while ( list [ src ] = = list [ src - 1 ] ) {
src + + ;
if ( src = = length )
goto after ;
}
/* dest always points to where the next unique element goes */
list [ dest ] = list [ src ] ;
dest + + ;
}
after :
return dest ;
}
2013-11-29 10:42:59 -05:00
/*
* The two pid files - task and cgroup.procs - guaranteed that the result
* is sorted, which forced this whole pidlist fiasco. As pid order is
* different per namespace, each namespace needs differently sorted list,
* making it impossible to use, for example, single rbtree of member tasks
* sorted by task pointer. As pidlists can be fairly large, allocating one
* per open file is dangerous, so cgroup had to implement shared pool of
* pidlists keyed by cgroup and namespace.
*
* All this extra complexity was caused by the original implementation
* committing to an entirely unnecessary property. In the long term, we
2014-07-09 10:08:08 -04:00
* want to do away with it. Explicitly scramble sort order if on the
* default hierarchy so that no such expectation exists in the new
* interface.
2013-11-29 10:42:59 -05:00
*
* Scrambling is done by swapping every two consecutive bits, which is
* non-identity one-to-one mapping which disturbs sort order sufficiently.
*/
static pid_t pid_fry ( pid_t pid )
{
unsigned a = pid & 0x55555555 ;
unsigned b = pid & 0xAAAAAAAA ;
return ( a < < 1 ) | ( b > > 1 ) ;
}
static pid_t cgroup_pid_fry ( struct cgroup * cgrp , pid_t pid )
{
2014-07-09 10:08:08 -04:00
if ( cgroup_on_dfl ( cgrp ) )
2013-11-29 10:42:59 -05:00
return pid_fry ( pid ) ;
else
return pid ;
}
2009-09-23 15:56:26 -07:00
static int cmppid ( const void * a , const void * b )
{
return * ( pid_t * ) a - * ( pid_t * ) b ;
}
2013-11-29 10:42:59 -05:00
static int fried_cmppid ( const void * a , const void * b )
{
return pid_fry ( * ( pid_t * ) a ) - pid_fry ( * ( pid_t * ) b ) ;
}
2009-09-23 15:56:27 -07:00
static struct cgroup_pidlist * cgroup_pidlist_find ( struct cgroup * cgrp ,
enum cgroup_filetype type )
{
struct cgroup_pidlist * l ;
/* don't need task_nsproxy() if we're looking at ourself */
2010-03-02 14:51:53 -08:00
struct pid_namespace * ns = task_active_pid_ns ( current ) ;
2010-03-10 15:22:12 -08:00
2013-11-29 10:42:59 -05:00
lockdep_assert_held ( & cgrp - > pidlist_mutex ) ;
list_for_each_entry ( l , & cgrp - > pidlists , links )
if ( l - > key . type = = type & & l - > key . ns = = ns )
2009-09-23 15:56:27 -07:00
return l ;
2013-11-29 10:42:59 -05:00
return NULL ;
}
2009-09-23 15:56:27 -07:00
/*
* find the appropriate pidlist for our purpose (given procs vs tasks)
* returns with the lock on that pidlist already held, and takes care
* of the use count, or returns NULL with no locks held if we're out of
* memory.
*/
2013-11-29 10:42:59 -05:00
static struct cgroup_pidlist * cgroup_pidlist_find_create ( struct cgroup * cgrp ,
enum cgroup_filetype type )
2009-09-23 15:56:27 -07:00
{
struct cgroup_pidlist * l ;
2010-03-10 15:22:12 -08:00
2013-11-29 10:42:59 -05:00
lockdep_assert_held ( & cgrp - > pidlist_mutex ) ;
l = cgroup_pidlist_find ( cgrp , type ) ;
if ( l )
return l ;
2009-09-23 15:56:27 -07:00
/* entry not found; create a new one */
2013-06-12 21:04:51 -07:00
l = kzalloc ( sizeof ( struct cgroup_pidlist ) , GFP_KERNEL ) ;
2013-11-29 10:42:59 -05:00
if ( ! l )
2009-09-23 15:56:27 -07:00
return l ;
2013-11-29 10:42:59 -05:00
2013-11-29 10:42:58 -05:00
INIT_DELAYED_WORK ( & l - > destroy_dwork , cgroup_pidlist_destroy_work_fn ) ;
2009-09-23 15:56:27 -07:00
l - > key . type = type ;
2013-11-29 10:42:59 -05:00
/* don't need task_nsproxy() if we're looking at ourself */
l - > key . ns = get_pid_ns ( task_active_pid_ns ( current ) ) ;
2009-09-23 15:56:27 -07:00
l - > owner = cgrp ;
list_add ( & l - > links , & cgrp - > pidlists ) ;
return l ;
}
2009-09-23 15:56:26 -07:00
/*
* Load a cgroup's pidarray with either procs' tgids or tasks' pids
*/
2009-09-23 15:56:27 -07:00
static int pidlist_array_load ( struct cgroup * cgrp , enum cgroup_filetype type ,
struct cgroup_pidlist * * lp )
2009-09-23 15:56:26 -07:00
{
pid_t * array ;
int length ;
int pid , n = 0 ; /* used for populating the array */
2013-08-08 20:11:26 -04:00
struct css_task_iter it ;
2007-10-18 23:39:36 -07:00
struct task_struct * tsk ;
2009-09-23 15:56:26 -07:00
struct cgroup_pidlist * l ;
2013-11-29 10:42:59 -05:00
lockdep_assert_held ( & cgrp - > pidlist_mutex ) ;
2009-09-23 15:56:26 -07:00
/*
* If cgroup gets more users after we read count, we won't have
* enough space - tough. This race is indistinguishable to the
* caller from the case that the additional cgroup users didn't
* show up until sometime later on.
*/
length = cgroup_task_count ( cgrp ) ;
2009-09-23 15:56:28 -07:00
array = pidlist_allocate ( length ) ;
2009-09-23 15:56:26 -07:00
if ( ! array )
return - ENOMEM ;
/* now, populate the array */
2014-05-14 09:15:00 -04:00
css_task_iter_start ( & cgrp - > self , & it ) ;
2013-08-08 20:11:26 -04:00
while ( ( tsk = css_task_iter_next ( & it ) ) ) {
2009-09-23 15:56:26 -07:00
if ( unlikely ( n = = length ) )
2007-10-18 23:39:36 -07:00
break ;
2009-09-23 15:56:26 -07:00
/* get tgid or pid for procs or tasks file respectively */
2009-09-23 15:56:27 -07:00
if ( type = = CGROUP_FILE_PROCS )
pid = task_tgid_vnr ( tsk ) ;
else
pid = task_pid_vnr ( tsk ) ;
2009-09-23 15:56:26 -07:00
if ( pid > 0 ) /* make sure to only use valid results */
array [ n + + ] = pid ;
2007-10-18 23:39:36 -07:00
}
2013-08-08 20:11:26 -04:00
css_task_iter_end ( & it ) ;
2009-09-23 15:56:26 -07:00
length = n ;
/* now sort & (if procs) strip out duplicates */
2014-07-09 10:08:08 -04:00
if ( cgroup_on_dfl ( cgrp ) )
2013-11-29 10:42:59 -05:00
sort ( array , length , sizeof ( pid_t ) , fried_cmppid , NULL ) ;
else
sort ( array , length , sizeof ( pid_t ) , cmppid , NULL ) ;
2009-09-23 15:56:27 -07:00
if ( type = = CGROUP_FILE_PROCS )
2013-03-12 15:36:00 -07:00
length = pidlist_uniq ( array , length ) ;
2013-11-29 10:42:59 -05:00
l = cgroup_pidlist_find_create ( cgrp , type ) ;
2009-09-23 15:56:27 -07:00
if ( ! l ) {
2009-09-23 15:56:28 -07:00
pidlist_free ( array ) ;
2009-09-23 15:56:27 -07:00
return - ENOMEM ;
2009-09-23 15:56:26 -07:00
}
2013-11-29 10:42:59 -05:00
/* store array, freeing old if necessary */
2009-09-23 15:56:28 -07:00
pidlist_free ( l - > list ) ;
2009-09-23 15:56:26 -07:00
l - > list = array ;
l - > length = length ;
2009-09-23 15:56:27 -07:00
* lp = l ;
2009-09-23 15:56:26 -07:00
return 0 ;
2007-10-18 23:39:32 -07:00
}
2007-10-18 23:39:44 -07:00
/**
2008-02-23 15:24:09 -08:00
* cgroupstats_build - build and fill cgroupstats
2007-10-18 23:39:44 -07:00
* @stats: cgroupstats to fill information into
* @dentry: A dentry entry belonging to the cgroup for which stats have
* been requested.
2008-02-23 15:24:09 -08:00
*
* Build and fill cgroupstats so that taskstats can export it to user
* space.
2007-10-18 23:39:44 -07:00
*/
int cgroupstats_build ( struct cgroupstats * stats , struct dentry * dentry )
{
2014-02-11 11:52:49 -05:00
struct kernfs_node * kn = kernfs_node_from_dentry ( dentry ) ;
2007-10-18 23:40:44 -07:00
struct cgroup * cgrp ;
2013-08-08 20:11:26 -04:00
struct css_task_iter it ;
2007-10-18 23:39:44 -07:00
struct task_struct * tsk ;
2008-11-19 15:36:48 -08:00
2014-02-11 11:52:49 -05:00
/* it should be kernfs_node belonging to cgroupfs and is a directory */
if ( dentry - > d_sb - > s_type ! = & cgroup_fs_type | | ! kn | |
kernfs_type ( kn ) ! = KERNFS_DIR )
return - EINVAL ;
2007-10-18 23:39:44 -07:00
2014-02-14 16:54:28 +08:00
mutex_lock ( & cgroup_mutex ) ;
2014-02-11 11:52:49 -05:00
/*
* We aren't being called from kernfs and there's no guarantee on
2014-05-13 12:11:01 -04:00
* @kn->priv's validity. For this and css_tryget_online_from_dir(),
2014-02-11 11:52:49 -05:00
* @kn->priv is RCU safe. Let's do the RCU dancing.
*/
rcu_read_lock ( ) ;
cgrp = rcu_dereference ( kn - > priv ) ;
2014-02-14 16:54:28 +08:00
if ( ! cgrp | | cgroup_is_dead ( cgrp ) ) {
2014-02-11 11:52:49 -05:00
rcu_read_unlock ( ) ;
2014-02-14 16:54:28 +08:00
mutex_unlock ( & cgroup_mutex ) ;
2014-02-11 11:52:49 -05:00
return - ENOENT ;
}
2014-02-14 16:54:28 +08:00
rcu_read_unlock ( ) ;
2007-10-18 23:39:44 -07:00
2014-05-14 09:15:00 -04:00
css_task_iter_start ( & cgrp - > self , & it ) ;
2013-08-08 20:11:26 -04:00
while ( ( tsk = css_task_iter_next ( & it ) ) ) {
2007-10-18 23:39:44 -07:00
switch ( tsk - > state ) {
case TASK_RUNNING :
stats - > nr_running + + ;
break ;
case TASK_INTERRUPTIBLE :
stats - > nr_sleeping + + ;
break ;
case TASK_UNINTERRUPTIBLE :
stats - > nr_uninterruptible + + ;
break ;
case TASK_STOPPED :
stats - > nr_stopped + + ;
break ;
default :
if ( delayacct_is_task_waiting_on_io ( tsk ) )
stats - > nr_io_wait + + ;
break ;
}
}
2013-08-08 20:11:26 -04:00
css_task_iter_end ( & it ) ;
2007-10-18 23:39:44 -07:00
2014-02-14 16:54:28 +08:00
mutex_unlock ( & cgroup_mutex ) ;
2014-02-11 11:52:49 -05:00
return 0 ;
2007-10-18 23:39:44 -07:00
}
2009-09-23 15:56:25 -07:00
2008-10-18 20:28:04 -07:00
/*
2009-09-23 15:56:26 -07:00
* seq_file methods for the tasks/procs files. The seq_file position is the
2008-10-18 20:28:04 -07:00
* next pid to display; the seq_file iterator is a pointer to the pid
2009-09-23 15:56:26 -07:00
* in the cgroup->l->list array.
2008-10-18 20:28:04 -07:00
*/
2009-09-23 15:56:26 -07:00
static void * cgroup_pidlist_start ( struct seq_file * s , loff_t * pos )
2008-10-18 20:28:04 -07:00
{
/*
* Initially we receive a position value that corresponds to
* one more than the last pid shown (or 0 on the first call or
* after a seek to the start). Use a binary-search to find the
* next pid to display, if any
*/
2014-02-11 11:52:49 -05:00
struct kernfs_open_file * of = s - > private ;
2013-12-05 12:28:04 -05:00
struct cgroup * cgrp = seq_css ( s ) - > cgroup ;
2013-11-29 10:42:59 -05:00
struct cgroup_pidlist * l ;
2013-12-05 12:28:04 -05:00
enum cgroup_filetype type = seq_cft ( s ) - > private ;
2008-10-18 20:28:04 -07:00
int index = 0 , pid = * pos ;
2013-11-29 10:42:59 -05:00
int * iter , ret ;
mutex_lock ( & cgrp - > pidlist_mutex ) ;
/*
2013-12-05 12:28:04 -05:00
* !NULL @of->priv indicates that this isn't the first start()
2013-11-29 10:42:59 -05:00
* after open. If the matching pidlist is around, we can use that.
2013-12-05 12:28:04 -05:00
* Look for it. Note that @of->priv can't be used directly. It
2013-11-29 10:42:59 -05:00
* could already have been destroyed.
*/
2013-12-05 12:28:04 -05:00
if ( of - > priv )
of - > priv = cgroup_pidlist_find ( cgrp , type ) ;
2013-11-29 10:42:59 -05:00
/*
* Either this is the first start() after open or the matching
* pidlist has been destroyed inbetween. Create a new one.
*/
2013-12-05 12:28:04 -05:00
if ( ! of - > priv ) {
ret = pidlist_array_load ( cgrp , type ,
( struct cgroup_pidlist * * ) & of - > priv ) ;
2013-11-29 10:42:59 -05:00
if ( ret )
return ERR_PTR ( ret ) ;
}
2013-12-05 12:28:04 -05:00
l = of - > priv ;
2008-10-18 20:28:04 -07:00
if ( pid ) {
2009-09-23 15:56:26 -07:00
int end = l - > length ;
2008-10-21 16:11:20 +11:00
2008-10-18 20:28:04 -07:00
while ( index < end ) {
int mid = ( index + end ) / 2 ;
2013-11-29 10:42:59 -05:00
if ( cgroup_pid_fry ( cgrp , l - > list [ mid ] ) = = pid ) {
2008-10-18 20:28:04 -07:00
index = mid ;
break ;
2013-11-29 10:42:59 -05:00
} else if ( cgroup_pid_fry ( cgrp , l - > list [ mid ] ) < = pid )
2008-10-18 20:28:04 -07:00
index = mid + 1 ;
else
end = mid ;
}
}
/* If we're off the end of the array, we're done */
2009-09-23 15:56:26 -07:00
if ( index > = l - > length )
2008-10-18 20:28:04 -07:00
return NULL ;
/* Update the abstract position to be the actual pid that we found */
2009-09-23 15:56:26 -07:00
iter = l - > list + index ;
2013-11-29 10:42:59 -05:00
* pos = cgroup_pid_fry ( cgrp , * iter ) ;
2008-10-18 20:28:04 -07:00
return iter ;
2007-10-18 23:39:32 -07:00
}
2009-09-23 15:56:26 -07:00
static void cgroup_pidlist_stop ( struct seq_file * s , void * v )
2008-10-18 20:28:04 -07:00
{
2014-02-11 11:52:49 -05:00
struct kernfs_open_file * of = s - > private ;
2013-12-05 12:28:04 -05:00
struct cgroup_pidlist * l = of - > priv ;
2013-11-29 10:42:58 -05:00
2013-12-05 12:28:04 -05:00
if ( l )
mod_delayed_work ( cgroup_pidlist_destroy_wq , & l - > destroy_dwork ,
2013-11-29 10:42:59 -05:00
CGROUP_PIDLIST_DESTROY_DELAY ) ;
2013-12-05 12:28:04 -05:00
mutex_unlock ( & seq_css ( s ) - > cgroup - > pidlist_mutex ) ;
2008-10-18 20:28:04 -07:00
}
2009-09-23 15:56:26 -07:00
static void * cgroup_pidlist_next ( struct seq_file * s , void * v , loff_t * pos )
2008-10-18 20:28:04 -07:00
{
2014-02-11 11:52:49 -05:00
struct kernfs_open_file * of = s - > private ;
2013-12-05 12:28:04 -05:00
struct cgroup_pidlist * l = of - > priv ;
2009-09-23 15:56:26 -07:00
pid_t * p = v ;
pid_t * end = l - > list + l - > length ;
2008-10-18 20:28:04 -07:00
/*
* Advance to the next pid in the array. If this goes off the
* end, we're done
*/
p + + ;
if ( p > = end ) {
return NULL ;
} else {
2013-12-05 12:28:04 -05:00
* pos = cgroup_pid_fry ( seq_css ( s ) - > cgroup , * p ) ;
2008-10-18 20:28:04 -07:00
return p ;
}
}
2009-09-23 15:56:26 -07:00
static int cgroup_pidlist_show ( struct seq_file * s , void * v )
2008-10-18 20:28:04 -07:00
{
2015-04-15 16:18:20 -07:00
seq_printf ( s , " %d \n " , * ( int * ) v ) ;
return 0 ;
2008-10-18 20:28:04 -07:00
}
2013-08-08 20:11:24 -04:00
static u64 cgroup_read_notify_on_release ( struct cgroup_subsys_state * css ,
struct cftype * cft )
2007-10-18 23:39:38 -07:00
{
2013-08-08 20:11:24 -04:00
return notify_on_release ( css - > cgroup ) ;
2007-10-18 23:39:38 -07:00
}
2013-08-08 20:11:24 -04:00
static int cgroup_write_notify_on_release ( struct cgroup_subsys_state * css ,
struct cftype * cft , u64 val )
2008-07-25 01:47:01 -07:00
{
if ( val )
2013-08-08 20:11:24 -04:00
set_bit ( CGRP_NOTIFY_ON_RELEASE , & css - > cgroup - > flags ) ;
2008-07-25 01:47:01 -07:00
else
2013-08-08 20:11:24 -04:00
clear_bit ( CGRP_NOTIFY_ON_RELEASE , & css - > cgroup - > flags ) ;
2008-07-25 01:47:01 -07:00
return 0 ;
}
2013-08-08 20:11:24 -04:00
static u64 cgroup_clone_children_read ( struct cgroup_subsys_state * css ,
struct cftype * cft )
2010-10-27 15:33:35 -07:00
{
2013-08-08 20:11:24 -04:00
return test_bit ( CGRP_CPUSET_CLONE_CHILDREN , & css - > cgroup - > flags ) ;
2010-10-27 15:33:35 -07:00
}
2013-08-08 20:11:24 -04:00
static int cgroup_clone_children_write ( struct cgroup_subsys_state * css ,
struct cftype * cft , u64 val )
2010-10-27 15:33:35 -07:00
{
if ( val )
2013-08-08 20:11:24 -04:00
set_bit ( CGRP_CPUSET_CLONE_CHILDREN , & css - > cgroup - > flags ) ;
2010-10-27 15:33:35 -07:00
else
2013-08-08 20:11:24 -04:00
clear_bit ( CGRP_CPUSET_CLONE_CHILDREN , & css - > cgroup - > flags ) ;
2010-10-27 15:33:35 -07:00
return 0 ;
}
2014-07-15 11:05:09 -04:00
/* cgroup core interface files for the default hierarchy */
static struct cftype cgroup_dfl_base_files [ ] = {
{
. name = " cgroup.procs " ,
2015-09-18 17:54:23 -04:00
. file_offset = offsetof ( struct cgroup , procs_file ) ,
2014-07-15 11:05:09 -04:00
. seq_start = cgroup_pidlist_start ,
. seq_next = cgroup_pidlist_next ,
. seq_stop = cgroup_pidlist_stop ,
. seq_show = cgroup_pidlist_show ,
. private = CGROUP_FILE_PROCS ,
. write = cgroup_procs_write ,
} ,
{
. name = " cgroup.controllers " ,
. seq_show = cgroup_controllers_show ,
} ,
{
. name = " cgroup.subtree_control " ,
. seq_show = cgroup_subtree_control_show ,
. write = cgroup_subtree_control_write ,
} ,
{
2015-09-18 17:54:22 -04:00
. name = " cgroup.events " ,
2014-07-15 11:05:09 -04:00
. flags = CFTYPE_NOT_ON_ROOT ,
2015-09-18 17:54:23 -04:00
. file_offset = offsetof ( struct cgroup , events_file ) ,
2015-09-18 17:54:22 -04:00
. seq_show = cgroup_events_show ,
2014-07-15 11:05:09 -04:00
} ,
{ } /* terminate */
} ;
/* cgroup core interface files for the legacy hierarchies */
static struct cftype cgroup_legacy_base_files [ ] = {
2007-10-18 23:39:38 -07:00
{
2013-06-03 19:14:34 -07:00
. name = " cgroup.procs " ,
2013-12-05 12:28:04 -05:00
. seq_start = cgroup_pidlist_start ,
. seq_next = cgroup_pidlist_next ,
. seq_stop = cgroup_pidlist_stop ,
. seq_show = cgroup_pidlist_show ,
2013-12-05 12:28:04 -05:00
. private = CGROUP_FILE_PROCS ,
2014-05-13 12:16:22 -04:00
. write = cgroup_procs_write ,
2009-09-23 15:56:26 -07:00
} ,
2010-10-27 15:33:35 -07:00
{
. name = " cgroup.clone_children " ,
. read_u64 = cgroup_clone_children_read ,
. write_u64 = cgroup_clone_children_write ,
} ,
2013-04-14 20:15:26 -07:00
{
. name = " cgroup.sane_behavior " ,
. flags = CFTYPE_ONLY_ON_ROOT ,
2013-12-05 12:28:04 -05:00
. seq_show = cgroup_sane_behavior_show ,
2013-04-14 20:15:26 -07:00
} ,
2013-06-03 19:14:34 -07:00
{
. name = " tasks " ,
2013-12-05 12:28:04 -05:00
. seq_start = cgroup_pidlist_start ,
. seq_next = cgroup_pidlist_next ,
. seq_stop = cgroup_pidlist_stop ,
. seq_show = cgroup_pidlist_show ,
2013-12-05 12:28:04 -05:00
. private = CGROUP_FILE_TASKS ,
2014-05-13 12:16:22 -04:00
. write = cgroup_tasks_write ,
2013-06-03 19:14:34 -07:00
} ,
{
. name = " notify_on_release " ,
. read_u64 = cgroup_read_notify_on_release ,
. write_u64 = cgroup_write_notify_on_release ,
} ,
2012-04-01 12:09:55 -07:00
{
. name = " release_agent " ,
2014-07-15 11:05:09 -04:00
. flags = CFTYPE_ONLY_ON_ROOT ,
2013-12-05 12:28:04 -05:00
. seq_show = cgroup_release_agent_show ,
2014-05-13 12:16:21 -04:00
. write = cgroup_release_agent_write ,
2014-02-11 11:52:48 -05:00
. max_write_len = PATH_MAX - 1 ,
2012-04-01 12:09:55 -07:00
} ,
2012-04-01 12:09:55 -07:00
{ } /* terminate */
2007-10-18 23:39:32 -07:00
} ;
2013-08-13 20:22:51 -04:00
/*
* css destruction is four-stage process.
*
* 1. Destruction starts. Killing of the percpu_ref is initiated.
* Implemented in kill_css().
*
* 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
2014-05-13 12:11:01 -04:00
* and thus css_tryget_online() is guaranteed to fail, the css can be
* offlined by invoking offline_css(). After offlining, the base ref is
* put. Implemented in css_killed_work_fn().
2013-08-13 20:22:51 -04:00
*
* 3. When the percpu_ref reaches zero, the only possible remaining
* accessors are inside RCU read sections. css_release() schedules the
* RCU callback.
*
* 4. After the grace period, the css can be freed. Implemented in
* css_free_work_fn().
*
* It is actually hairier because both step 2 and 4 require process context
* and thus involve punting to css->destroy_work adding two additional
* steps to the already complex sequence.
*/
2013-08-13 11:01:54 -04:00
static void css_free_work_fn ( struct work_struct * work )
2012-04-01 12:09:56 -07:00
{
struct cgroup_subsys_state * css =
2013-08-13 11:01:54 -04:00
container_of ( work , struct cgroup_subsys_state , destroy_work ) ;
2015-02-12 14:59:26 -08:00
struct cgroup_subsys * ss = css - > ss ;
2013-08-13 20:22:51 -04:00
struct cgroup * cgrp = css - > cgroup ;
2012-04-01 12:09:56 -07:00
2014-06-28 08:10:14 -04:00
percpu_ref_exit ( & css - > refcnt ) ;
2015-02-12 14:59:26 -08:00
if ( ss ) {
2014-05-14 09:15:02 -04:00
/* css free path */
2016-01-21 15:32:15 -05:00
struct cgroup_subsys_state * parent = css - > parent ;
2015-02-12 14:59:26 -08:00
int id = css - > id ;
ss - > css_free ( css ) ;
cgroup_idr_remove ( & ss - > css_idr , id ) ;
2014-05-14 09:15:02 -04:00
cgroup_put ( cgrp ) ;
2016-01-21 15:32:15 -05:00
if ( parent )
css_put ( parent ) ;
2014-05-14 09:15:02 -04:00
} else {
/* cgroup free path */
atomic_dec ( & cgrp - > root - > nr_cgrps ) ;
cgroup_pidlist_destroy_all ( cgrp ) ;
2014-09-18 16:06:19 +08:00
cancel_work_sync ( & cgrp - > release_agent_work ) ;
2014-05-14 09:15:02 -04:00
2014-05-16 13:22:48 -04:00
if ( cgroup_parent ( cgrp ) ) {
2014-05-14 09:15:02 -04:00
/*
* We get a ref to the parent, and put the ref when
* this cgroup is being freed, so it's guaranteed
* that the parent won't be destroyed before its
* children.
*/
2014-05-16 13:22:48 -04:00
cgroup_put ( cgroup_parent ( cgrp ) ) ;
2014-05-14 09:15:02 -04:00
kernfs_put ( cgrp - > kn ) ;
kfree ( cgrp ) ;
} else {
/*
* This is root cgroup's refcnt reaching zero,
* which indicates that the root should be
* released.
*/
cgroup_destroy_root ( cgrp - > root ) ;
}
}
2013-08-13 20:22:51 -04:00
}
static void css_free_rcu_fn ( struct rcu_head * rcu_head )
{
struct cgroup_subsys_state * css =
container_of ( rcu_head , struct cgroup_subsys_state , rcu_head ) ;
INIT_WORK ( & css - > destroy_work , css_free_work_fn ) ;
2013-11-22 17:14:39 -05:00
queue_work ( cgroup_destroy_wq , & css - > destroy_work ) ;
2012-04-01 12:09:56 -07:00
}
2014-05-14 09:15:02 -04:00
static void css_release_work_fn ( struct work_struct * work )
2013-06-13 19:39:16 -07:00
{
struct cgroup_subsys_state * css =
2014-05-14 09:15:02 -04:00
container_of ( work , struct cgroup_subsys_state , destroy_work ) ;
2014-05-04 15:09:14 -04:00
struct cgroup_subsys * ss = css - > ss ;
2014-05-14 09:15:02 -04:00
struct cgroup * cgrp = css - > cgroup ;
2014-05-04 15:09:14 -04:00
2014-05-16 13:22:49 -04:00
mutex_lock ( & cgroup_mutex ) ;
2014-05-16 13:22:49 -04:00
css - > flags | = CSS_RELEASED ;
2014-05-16 13:22:49 -04:00
list_del_rcu ( & css - > sibling ) ;
2014-05-14 09:15:02 -04:00
if ( ss ) {
/* css release path */
2015-02-12 14:59:26 -08:00
cgroup_idr_replace ( & ss - > css_idr , NULL , css - > id ) ;
2014-11-18 02:49:51 -05:00
if ( ss - > css_released )
ss - > css_released ( css ) ;
2014-05-14 09:15:02 -04:00
} else {
/* cgroup release path */
2016-08-10 11:23:44 -04:00
trace_cgroup_release ( cgrp ) ;
2014-05-14 09:15:02 -04:00
cgroup_idr_remove ( & cgrp - > root - > cgroup_idr , cgrp - > id ) ;
cgrp - > id = - 1 ;
2014-09-04 14:43:07 +08:00
/*
* There are two control paths which try to determine
* cgroup from dentry without going through kernfs -
* cgroupstats_build() and css_tryget_online_from_dir().
* Those are supported by RCU protecting clearing of
* cgrp->kn->priv backpointer.
*/
2016-03-03 09:57:58 -05:00
if ( cgrp - > kn )
RCU_INIT_POINTER ( * ( void __rcu __force * * ) & cgrp - > kn - > priv ,
NULL ) ;
2016-11-23 16:52:26 +01:00
cgroup_bpf_put ( cgrp ) ;
2014-05-14 09:15:02 -04:00
}
2013-06-13 19:39:16 -07:00
2014-05-16 13:22:49 -04:00
mutex_unlock ( & cgroup_mutex ) ;
2013-08-13 20:22:51 -04:00
call_rcu ( & css - > rcu_head , css_free_rcu_fn ) ;
2013-06-13 19:39:16 -07:00
}
static void css_release ( struct percpu_ref * ref )
{
struct cgroup_subsys_state * css =
container_of ( ref , struct cgroup_subsys_state , refcnt ) ;
2014-05-14 09:15:02 -04:00
INIT_WORK ( & css - > destroy_work , css_release_work_fn ) ;
queue_work ( cgroup_destroy_wq , & css - > destroy_work ) ;
2013-06-13 19:39:16 -07:00
}
2014-05-04 15:09:14 -04:00
static void init_and_link_css ( struct cgroup_subsys_state * css ,
struct cgroup_subsys * ss , struct cgroup * cgrp )
2007-10-18 23:39:30 -07:00
{
2014-05-16 13:22:49 -04:00
lockdep_assert_held ( & cgroup_mutex ) ;
2014-05-04 15:09:14 -04:00
cgroup_get ( cgrp ) ;
2014-05-16 13:22:48 -04:00
memset ( css , 0 , sizeof ( * css ) ) ;
2007-10-18 23:40:44 -07:00
css - > cgroup = cgrp ;
2013-08-08 20:11:22 -04:00
css - > ss = ss ;
2016-05-26 15:42:13 -04:00
css - > id = - 1 ;
2014-05-16 13:22:48 -04:00
INIT_LIST_HEAD ( & css - > sibling ) ;
INIT_LIST_HEAD ( & css - > children ) ;
2014-05-16 13:22:49 -04:00
css - > serial_nr = css_serial_nr_next + + ;
2016-01-21 15:31:11 -05:00
atomic_set ( & css - > online_cnt , 0 ) ;
2012-04-01 12:09:56 -07:00
2014-05-16 13:22:48 -04:00
if ( cgroup_parent ( cgrp ) ) {
css - > parent = cgroup_css ( cgroup_parent ( cgrp ) , ss ) ;
2014-05-04 15:09:14 -04:00
css_get ( css - > parent ) ;
}
2013-08-13 11:01:54 -04:00
2013-08-26 18:40:56 -04:00
BUG_ON ( cgroup_css ( cgrp , ss ) ) ;
2007-10-18 23:39:30 -07:00
}
2013-07-31 16:16:40 +08:00
/* invoke ->css_online() on a new CSS and mark it online if successful */
2013-08-13 11:01:55 -04:00
static int online_css ( struct cgroup_subsys_state * css )
2012-11-19 08:13:37 -08:00
{
2013-08-13 11:01:55 -04:00
struct cgroup_subsys * ss = css - > ss ;
2012-11-19 08:13:38 -08:00
int ret = 0 ;
2012-11-19 08:13:37 -08:00
lockdep_assert_held ( & cgroup_mutex ) ;
2012-11-19 08:13:38 -08:00
if ( ss - > css_online )
2013-08-08 20:11:23 -04:00
ret = ss - > css_online ( css ) ;
2013-08-13 20:22:50 -04:00
if ( ! ret ) {
2013-08-08 20:11:23 -04:00
css - > flags | = CSS_ONLINE ;
2014-02-08 10:36:58 -05:00
rcu_assign_pointer ( css - > cgroup - > subsys [ ss - > id ] , css ) ;
2016-01-21 15:31:11 -05:00
atomic_inc ( & css - > online_cnt ) ;
if ( css - > parent )
atomic_inc ( & css - > parent - > online_cnt ) ;
2013-08-13 20:22:50 -04:00
}
2012-11-19 08:13:38 -08:00
return ret ;
2012-11-19 08:13:37 -08:00
}
2013-07-31 16:16:40 +08:00
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
2013-08-13 11:01:55 -04:00
static void offline_css ( struct cgroup_subsys_state * css )
2012-11-19 08:13:37 -08:00
{
2013-08-13 11:01:55 -04:00
struct cgroup_subsys * ss = css - > ss ;
2012-11-19 08:13:37 -08:00
lockdep_assert_held ( & cgroup_mutex ) ;
if ( ! ( css - > flags & CSS_ONLINE ) )
return ;
2016-03-01 19:56:30 +03:00
if ( ss - > css_reset )
ss - > css_reset ( css ) ;
2013-03-12 15:35:59 -07:00
if ( ss - > css_offline )
2013-08-08 20:11:23 -04:00
ss - > css_offline ( css ) ;
2012-11-19 08:13:37 -08:00
2013-08-08 20:11:23 -04:00
css - > flags & = ~ CSS_ONLINE ;
2014-04-23 11:13:15 -04:00
RCU_INIT_POINTER ( css - > cgroup - > subsys [ ss - > id ] , NULL ) ;
2014-04-23 11:13:16 -04:00
wake_up_all ( & css - > cgroup - > offline_waitq ) ;
2012-11-19 08:13:37 -08:00
}
2013-12-06 15:11:56 -05:00
/**
2016-03-03 09:57:58 -05:00
* css_create - create a cgroup_subsys_state
2013-12-06 15:11:56 -05:00
* @cgrp: the cgroup new css will be associated with
* @ss: the subsys of new css
*
* Create a new css associated with @cgrp - @ss pair. On success, the new
2016-03-03 09:57:58 -05:00
* css is online and installed in @cgrp. This function doesn't create the
* interface files. Returns 0 on success, -errno on failure.
2013-12-06 15:11:56 -05:00
*/
2016-03-03 09:57:58 -05:00
static struct cgroup_subsys_state * css_create ( struct cgroup * cgrp ,
struct cgroup_subsys * ss )
2013-12-06 15:11:56 -05:00
{
2014-05-16 13:22:48 -04:00
struct cgroup * parent = cgroup_parent ( cgrp ) ;
2014-05-16 13:22:49 -04:00
struct cgroup_subsys_state * parent_css = cgroup_css ( parent , ss ) ;
2013-12-06 15:11:56 -05:00
struct cgroup_subsys_state * css ;
int err ;
lockdep_assert_held ( & cgroup_mutex ) ;
2014-05-16 13:22:49 -04:00
css = ss - > css_alloc ( parent_css ) ;
2016-06-21 13:06:24 -04:00
if ( ! css )
css = ERR_PTR ( - ENOMEM ) ;
2013-12-06 15:11:56 -05:00
if ( IS_ERR ( css ) )
2016-03-03 09:57:58 -05:00
return css ;
2013-12-06 15:11:56 -05:00
2014-05-04 15:09:14 -04:00
init_and_link_css ( css , ss , cgrp ) ;
2014-05-04 15:09:14 -04:00
2014-09-24 13:31:50 -04:00
err = percpu_ref_init ( & css - > refcnt , css_release , 0 , GFP_KERNEL ) ;
2013-12-06 15:11:56 -05:00
if ( err )
2014-03-18 17:02:36 +08:00
goto err_free_css ;
2013-12-06 15:11:56 -05:00
2015-08-03 15:32:26 +03:00
err = cgroup_idr_alloc ( & ss - > css_idr , NULL , 2 , 0 , GFP_KERNEL ) ;
2014-05-04 15:09:14 -04:00
if ( err < 0 )
2016-05-13 22:59:20 +08:00
goto err_free_css ;
2014-05-04 15:09:14 -04:00
css - > id = err ;
2013-12-06 15:11:56 -05:00
2014-05-04 15:09:14 -04:00
/* @css is ready to be brought online now, make it visible */
2014-05-16 13:22:49 -04:00
list_add_tail_rcu ( & css - > sibling , & parent_css - > children ) ;
2014-05-04 15:09:14 -04:00
cgroup_idr_replace ( & ss - > css_idr , css , css - > id ) ;
2013-12-06 15:11:56 -05:00
err = online_css ( css ) ;
if ( err )
2014-05-16 13:22:49 -04:00
goto err_list_del ;
2014-03-19 10:23:54 -04:00
2013-12-06 15:11:56 -05:00
if ( ss - > broken_hierarchy & & ! ss - > warned_broken_hierarchy & &
2014-05-16 13:22:48 -04:00
cgroup_parent ( parent ) ) {
2014-04-25 18:28:03 -04:00
pr_warn ( " %s (%d) created nested cgroup for controller \" %s \" which has incomplete hierarchy support. Nested cgroups may change behavior in the future. \n " ,
2014-04-25 18:28:03 -04:00
current - > comm , current - > pid , ss - > name ) ;
2013-12-06 15:11:56 -05:00
if ( ! strcmp ( ss - > name , " memory " ) )
2014-04-25 18:28:03 -04:00
pr_warn ( " \" memory \" requires setting use_hierarchy to 1 on the root \n " ) ;
2013-12-06 15:11:56 -05:00
ss - > warned_broken_hierarchy = true ;
}
2016-03-03 09:57:58 -05:00
return css ;
2013-12-06 15:11:56 -05:00
2014-05-16 13:22:49 -04:00
err_list_del :
list_del_rcu ( & css - > sibling ) ;
2014-03-18 17:02:36 +08:00
err_free_css :
2014-05-04 15:09:14 -04:00
call_rcu ( & css - > rcu_head , css_free_rcu_fn ) ;
2016-03-03 09:57:58 -05:00
return ERR_PTR ( err ) ;
2013-12-06 15:11:56 -05:00
}
2016-03-03 09:57:58 -05:00
static struct cgroup * cgroup_create ( struct cgroup * parent )
2007-10-18 23:39:30 -07:00
{
2016-03-03 09:57:58 -05:00
struct cgroup_root * root = parent - > root ;
struct cgroup * cgrp , * tcgrp ;
int level = parent - > level + 1 ;
2016-03-03 09:58:00 -05:00
int ret ;
2007-10-18 23:39:30 -07:00
2012-11-19 09:02:12 -08:00
/* allocate the cgroup and its ID, 0 is reserved for the root */
2015-11-20 15:55:52 -05:00
cgrp = kzalloc ( sizeof ( * cgrp ) +
sizeof ( cgrp - > ancestor_ids [ 0 ] ) * ( level + 1 ) , GFP_KERNEL ) ;
2016-03-03 09:57:58 -05:00
if ( ! cgrp )
return ERR_PTR ( - ENOMEM ) ;
2014-02-11 16:05:46 +08:00
2014-09-24 13:31:50 -04:00
ret = percpu_ref_init ( & cgrp - > self . refcnt , css_release , 0 , GFP_KERNEL ) ;
2014-05-14 09:15:02 -04:00
if ( ret )
goto out_free_cgrp ;
2014-02-11 16:05:46 +08:00
/*
* Temporarily set the pointer to NULL, so idr_find() won't return
* a half-baked cgroup.
*/
2015-08-03 15:32:26 +03:00
cgrp - > id = cgroup_idr_alloc ( & root - > cgroup_idr , NULL , 2 , 0 , GFP_KERNEL ) ;
2014-02-11 16:05:46 +08:00
if ( cgrp - > id < 0 ) {
2014-05-13 12:19:22 -04:00
ret = - ENOMEM ;
2014-05-14 09:15:02 -04:00
goto out_cancel_ref ;
2012-11-05 09:16:59 -08:00
}
2008-10-18 20:28:04 -07:00
init_cgroup_housekeeping ( cgrp ) ;
2007-10-18 23:39:30 -07:00
2014-05-14 09:15:00 -04:00
cgrp - > self . parent = & parent - > self ;
2014-05-13 12:19:22 -04:00
cgrp - > root = root ;
2015-11-20 15:55:52 -05:00
cgrp - > level = level ;
for ( tcgrp = cgrp ; tcgrp ; tcgrp = cgroup_parent ( tcgrp ) )
cgrp - > ancestor_ids [ tcgrp - > level ] = tcgrp - > id ;
2007-10-18 23:39:30 -07:00
2008-03-04 14:28:19 -08:00
if ( notify_on_release ( parent ) )
set_bit ( CGRP_NOTIFY_ON_RELEASE , & cgrp - > flags ) ;
2012-11-19 08:13:38 -08:00
if ( test_bit ( CGRP_CPUSET_CLONE_CHILDREN , & parent - > flags ) )
set_bit ( CGRP_CPUSET_CLONE_CHILDREN , & cgrp - > flags ) ;
2010-10-27 15:33:35 -07:00
2014-05-16 13:22:49 -04:00
cgrp - > self . serial_nr = css_serial_nr_next + + ;
2013-05-24 10:55:38 +09:00
2012-11-19 08:13:36 -08:00
/* allocation complete, commit to creation */
2014-05-16 13:22:48 -04:00
list_add_tail_rcu ( & cgrp - > self . sibling , & cgroup_parent ( cgrp ) - > self . children ) ;
2014-02-12 09:29:50 -05:00
atomic_inc ( & root - > nr_cgrps ) ;
2014-02-11 11:52:49 -05:00
cgroup_get ( parent ) ;
2013-04-08 14:35:02 +08:00
2013-12-06 15:11:56 -05:00
/*
* @cgrp is now fully operational. If something fails after this
* point, it'll be released via the normal destruction path.
*/
2014-05-04 15:09:13 -04:00
cgroup_idr_replace ( & root - > cgroup_idr , cgrp , cgrp - > id ) ;
2013-07-31 09:50:50 +08:00
2014-04-23 11:13:16 -04:00
/*
* On the default hierarchy, a child doesn't automatically inherit
2014-07-08 18:02:56 -04:00
* subtree_control from the parent. Each is configured manually.
2014-04-23 11:13:16 -04:00
*/
2016-03-03 09:58:00 -05:00
if ( ! cgroup_on_dfl ( cgrp ) )
2016-03-03 09:57:58 -05:00
cgrp - > subtree_control = cgroup_control ( cgrp ) ;
2016-03-03 09:58:00 -05:00
2016-11-23 16:52:26 +01:00
if ( parent )
cgroup_bpf_inherit ( cgrp , parent ) ;
2016-03-03 09:58:00 -05:00
cgroup_propagate_control ( cgrp ) ;
/* @cgrp doesn't have dir yet so the following will only create csses */
ret = cgroup_apply_control_enable ( cgrp ) ;
if ( ret )
goto out_destroy ;
2014-04-23 11:13:14 -04:00
2016-03-03 09:57:58 -05:00
return cgrp ;
out_cancel_ref :
percpu_ref_exit ( & cgrp - > self . refcnt ) ;
out_free_cgrp :
kfree ( cgrp ) ;
return ERR_PTR ( ret ) ;
out_destroy :
cgroup_destroy_locked ( cgrp ) ;
return ERR_PTR ( ret ) ;
}
static int cgroup_mkdir ( struct kernfs_node * parent_kn , const char * name ,
umode_t mode )
{
struct cgroup * parent , * cgrp ;
struct kernfs_node * kn ;
2016-03-03 09:58:00 -05:00
int ret ;
2016-03-03 09:57:58 -05:00
/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
if ( strchr ( name , ' \n ' ) )
return - EINVAL ;
2016-03-03 09:58:00 -05:00
parent = cgroup_kn_lock_live ( parent_kn , false ) ;
2016-03-03 09:57:58 -05:00
if ( ! parent )
return - ENODEV ;
cgrp = cgroup_create ( parent ) ;
if ( IS_ERR ( cgrp ) ) {
ret = PTR_ERR ( cgrp ) ;
goto out_unlock ;
}
2016-03-03 09:57:58 -05:00
/* create the directory */
kn = kernfs_create_dir ( parent - > kn , name , mode , cgrp ) ;
if ( IS_ERR ( kn ) ) {
ret = PTR_ERR ( kn ) ;
goto out_destroy ;
}
cgrp - > kn = kn ;
/*
* This extra ref will be put in cgroup_free_fn() and guarantees
* that @cgrp->kn is always accessible.
*/
kernfs_get ( kn ) ;
ret = cgroup_kn_set_ugid ( kn ) ;
if ( ret )
goto out_destroy ;
2016-03-03 09:58:01 -05:00
ret = css_populate_dir ( & cgrp - > self ) ;
2016-03-03 09:57:58 -05:00
if ( ret )
goto out_destroy ;
2016-03-03 09:58:00 -05:00
ret = cgroup_apply_control_enable ( cgrp ) ;
if ( ret )
goto out_destroy ;
2016-03-03 09:57:58 -05:00
2016-08-10 11:23:44 -04:00
trace_cgroup_mkdir ( cgrp ) ;
2016-03-03 09:57:58 -05:00
/* let's create and online css's */
2014-02-11 11:52:49 -05:00
kernfs_activate ( kn ) ;
2014-05-13 12:19:22 -04:00
ret = 0 ;
goto out_unlock ;
2007-10-18 23:39:30 -07:00
2016-03-03 09:57:58 -05:00
out_destroy :
cgroup_destroy_locked ( cgrp ) ;
2014-05-13 12:19:22 -04:00
out_unlock :
2014-05-13 12:19:22 -04:00
cgroup_kn_unlock ( parent_kn ) ;
2014-03-20 11:10:15 -04:00
return ret ;
2007-10-18 23:39:30 -07:00
}
2013-08-13 20:22:50 -04:00
/*
* This is called when the refcnt of a css is confirmed to be killed.
2014-05-14 09:15:01 -04:00
* css_tryget_online() is now guaranteed to fail. Tell the subsystem to
* initate destruction and put the css ref from kill_css().
2013-08-13 20:22:50 -04:00
*/
static void css_killed_work_fn ( struct work_struct * work )
2013-06-13 19:39:16 -07:00
{
2013-08-13 20:22:50 -04:00
struct cgroup_subsys_state * css =
container_of ( work , struct cgroup_subsys_state , destroy_work ) ;
2013-06-13 19:39:16 -07:00
2013-08-13 20:22:50 -04:00
mutex_lock ( & cgroup_mutex ) ;
2013-08-13 20:22:50 -04:00
2016-01-21 15:31:11 -05:00
do {
offline_css ( css ) ;
css_put ( css ) ;
/* @css can't go away while we're holding cgroup_mutex */
css = css - > parent ;
} while ( css & & atomic_dec_and_test ( & css - > online_cnt ) ) ;
mutex_unlock ( & cgroup_mutex ) ;
2013-06-13 19:39:16 -07:00
}
2013-08-13 20:22:50 -04:00
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn ( struct percpu_ref * ref )
2013-06-13 19:39:16 -07:00
{
struct cgroup_subsys_state * css =
container_of ( ref , struct cgroup_subsys_state , refcnt ) ;
2016-01-21 15:31:11 -05:00
if ( atomic_dec_and_test ( & css - > online_cnt ) ) {
INIT_WORK ( & css - > destroy_work , css_killed_work_fn ) ;
queue_work ( cgroup_destroy_wq , & css - > destroy_work ) ;
}
2013-06-13 19:39:16 -07:00
}
2014-04-23 11:13:14 -04:00
/**
* kill_css - destroy a css
* @css: css to destroy
*
* This function initiates destruction of @css by removing cgroup interface
* files and putting its base reference. ->css_offline() will be invoked
2014-05-13 12:11:01 -04:00
* asynchronously once css_tryget_online() is guaranteed to fail and when
* the reference count reaches zero, @css will be released.
2014-04-23 11:13:14 -04:00
*/
static void kill_css ( struct cgroup_subsys_state * css )
2013-08-13 20:22:51 -04:00
{
2014-05-13 12:19:23 -04:00
lockdep_assert_held ( & cgroup_mutex ) ;
2014-03-19 10:23:54 -04:00
2014-02-11 11:52:49 -05:00
/*
* This must happen before css is disassociated with its cgroup.
* See seq_css() for details.
*/
2016-03-03 09:58:01 -05:00
css_clear_dir ( css ) ;
2013-08-13 20:22:51 -04:00
2013-08-13 20:22:51 -04:00
/*
* Killing would put the base ref, but we need to keep it alive
* until after ->css_offline().
*/
css_get ( css ) ;
/*
* cgroup core guarantees that, by the time ->css_offline() is
* invoked, no new css reference will be given out via
2014-05-13 12:11:01 -04:00
* css_tryget_online(). We can't simply call percpu_ref_kill() and
2013-08-13 20:22:51 -04:00
* proceed to offlining css's because percpu_ref_kill() doesn't
* guarantee that the ref is seen as killed on all CPUs on return.
*
* Use percpu_ref_kill_and_confirm() to get notifications as each
* css is confirmed to be seen as killed on all CPUs.
*/
percpu_ref_kill_and_confirm ( & css - > refcnt , css_killed_ref_fn ) ;
2013-06-13 19:39:16 -07:00
}
/**
* cgroup_destroy_locked - the first stage of cgroup destruction
* @cgrp: cgroup to be destroyed
*
* css's make use of percpu refcnts whose killing latency shouldn't be
* exposed to userland and are RCU protected. Also, cgroup core needs to
2014-05-13 12:11:01 -04:00
* guarantee that css_tryget_online() won't succeed by the time
* ->css_offline() is invoked. To satisfy all the requirements,
* destruction is implemented in the following two steps.
2013-06-13 19:39:16 -07:00
*
* s1. Verify @cgrp can be destroyed and mark it dying. Remove all
* userland visible parts and start killing the percpu refcnts of
* css's. Set up so that the next stage will be kicked off once all
* the percpu refcnts are confirmed to be killed.
*
* s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
* rest of destruction. Once all cgroup references are gone, the
* cgroup is RCU-freed.
*
* This function implements s1. After this step, @cgrp is gone as far as
* the userland is concerned and a new cgroup with the same name may be
* created. As cgroup doesn't care about the names internally, this
* doesn't cause any problem.
*/
2012-11-19 08:13:37 -08:00
static int cgroup_destroy_locked ( struct cgroup * cgrp )
__releases ( & cgroup_mutex ) __acquires ( & cgroup_mutex )
2007-10-18 23:39:30 -07:00
{
2014-02-11 11:52:49 -05:00
struct cgroup_subsys_state * css ;
2016-03-15 20:43:04 -04:00
struct cgrp_cset_link * link ;
2013-12-06 15:11:56 -05:00
int ssid ;
2007-10-18 23:39:30 -07:00
2012-11-19 08:13:37 -08:00
lockdep_assert_held ( & cgroup_mutex ) ;
2015-10-15 16:41:51 -04:00
/*
* Only migration can raise populated from zero and we're already
* holding cgroup_mutex.
*/
if ( cgroup_is_populated ( cgrp ) )
2007-10-18 23:39:30 -07:00
return - EBUSY ;
2012-11-05 09:16:58 -08:00
2013-08-28 16:31:23 -07:00
/*
2014-05-16 13:22:48 -04:00
* Make sure there's no live children. We can't test emptiness of
* ->self.children as dead children linger on it while being
* drained; otherwise, "rmdir parent/child parent" may fail.
2013-08-28 16:31:23 -07:00
*/
2014-05-16 13:22:52 -04:00
if ( css_has_online_children ( & cgrp - > self ) )
2013-08-28 16:31:23 -07:00
return - EBUSY ;
2013-06-13 19:27:41 -07:00
/*
2016-03-15 20:43:04 -04:00
* Mark @cgrp and the associated csets dead. The former prevents
* further task migration and child creation by disabling
* cgroup_lock_live_group(). The latter makes the csets ignored by
* the migration path.
2013-06-13 19:27:41 -07:00
*/
2014-05-16 13:22:51 -04:00
cgrp - > self . flags & = ~ CSS_ONLINE ;
2012-11-05 09:16:59 -08:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2016-03-15 20:43:04 -04:00
list_for_each_entry ( link , & cgrp - > cset_links , cset_link )
link - > cset - > dead = true ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2016-03-15 20:43:04 -04:00
2014-05-14 09:15:01 -04:00
/* initiate massacre of all css's */
2013-12-06 15:11:56 -05:00
for_each_css ( css , ssid , cgrp )
kill_css ( css ) ;
2013-06-13 19:27:41 -07:00
/*
2014-05-13 12:19:23 -04:00
* Remove @cgrp directory along with the base files. @cgrp has an
* extra ref on its kn.
2013-06-13 19:27:41 -07:00
*/
2014-05-13 12:19:23 -04:00
kernfs_remove ( cgrp - > kn ) ;
2013-08-13 20:22:50 -04:00
2014-05-16 13:22:48 -04:00
check_for_release ( cgroup_parent ( cgrp ) ) ;
2014-02-11 11:52:49 -05:00
2014-05-14 09:15:01 -04:00
/* put the base reference */
2014-05-14 09:15:02 -04:00
percpu_ref_kill ( & cgrp - > self . refcnt ) ;
2013-06-13 19:27:41 -07:00
2013-06-13 19:27:42 -07:00
return 0 ;
} ;
2014-02-11 11:52:49 -05:00
static int cgroup_rmdir ( struct kernfs_node * kn )
2012-11-19 08:13:37 -08:00
{
2014-05-13 12:19:22 -04:00
struct cgroup * cgrp ;
2014-02-11 11:52:49 -05:00
int ret = 0 ;
2012-11-19 08:13:37 -08:00
2016-03-03 09:58:00 -05:00
cgrp = cgroup_kn_lock_live ( kn , false ) ;
2014-05-13 12:19:22 -04:00
if ( ! cgrp )
return 0 ;
2012-11-19 08:13:37 -08:00
2014-05-13 12:19:22 -04:00
ret = cgroup_destroy_locked ( cgrp ) ;
2012-11-19 08:13:37 -08:00
2016-08-10 11:23:44 -04:00
if ( ! ret )
trace_cgroup_rmdir ( cgrp ) ;
2014-05-13 12:19:22 -04:00
cgroup_kn_unlock ( kn ) ;
2012-11-19 08:13:37 -08:00
return ret ;
}
2014-02-11 11:52:49 -05:00
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
. remount_fs = cgroup_remount ,
. show_options = cgroup_show_options ,
. mkdir = cgroup_mkdir ,
. rmdir = cgroup_rmdir ,
. rename = cgroup_rename ,
2016-05-09 09:59:55 -05:00
. show_path = cgroup_show_path ,
2014-02-11 11:52:49 -05:00
} ;
2012-04-01 12:09:55 -07:00
2014-05-04 15:09:14 -04:00
static void __init cgroup_init_subsys ( struct cgroup_subsys * ss , bool early )
2007-10-18 23:39:30 -07:00
{
struct cgroup_subsys_state * css ;
2007-11-14 16:58:54 -08:00
2015-12-29 14:53:56 -05:00
pr_debug ( " Initializing cgroup subsys %s \n " , ss - > name ) ;
2007-10-18 23:39:30 -07:00
2012-11-19 08:13:36 -08:00
mutex_lock ( & cgroup_mutex ) ;
2014-05-04 15:09:14 -04:00
idr_init ( & ss - > css_idr ) ;
2014-02-12 09:29:48 -05:00
INIT_LIST_HEAD ( & ss - > cfts ) ;
2012-04-01 12:09:55 -07:00
2014-03-19 10:23:54 -04:00
/* Create the root cgroup state for this subsystem */
ss - > root = & cgrp_dfl_root ;
css = ss - > css_alloc ( cgroup_css ( & cgrp_dfl_root . cgrp , ss ) ) ;
2007-10-18 23:39:30 -07:00
/* We don't handle early failures gracefully */
BUG_ON ( IS_ERR ( css ) ) ;
2014-05-04 15:09:14 -04:00
init_and_link_css ( css , ss , & cgrp_dfl_root . cgrp ) ;
2014-05-16 13:22:47 -04:00
/*
* Root csses are never destroyed and we can't initialize
* percpu_ref during early init. Disable refcnting.
*/
css - > flags | = CSS_NO_REF ;
2014-05-04 15:09:14 -04:00
if ( early ) {
2014-05-14 09:15:02 -04:00
/* allocation can't be done safely during early init */
2014-05-04 15:09:14 -04:00
css - > id = 1 ;
} else {
css - > id = cgroup_idr_alloc ( & ss - > css_idr , css , 1 , 2 , GFP_KERNEL ) ;
BUG_ON ( css - > id < 0 ) ;
}
2007-10-18 23:39:30 -07:00
2008-04-29 01:00:13 -07:00
/* Update the init_css_set to contain a subsys
2007-10-18 23:39:36 -07:00
* pointer to this state - since the subsystem is
2008-04-29 01:00:13 -07:00
* newly registered, all tasks and hence the
2014-03-19 10:23:54 -04:00
* init_css_set is in the subsystem's root cgroup. */
2014-02-08 10:36:58 -05:00
init_css_set . subsys [ ss - > id ] = css ;
2007-10-18 23:39:30 -07:00
2015-06-06 10:02:14 +10:00
have_fork_callback | = ( bool ) ss - > fork < < ss - > id ;
have_exit_callback | = ( bool ) ss - > exit < < ss - > id ;
2015-10-15 16:41:53 -04:00
have_free_callback | = ( bool ) ss - > free < < ss - > id ;
2015-06-09 21:32:09 +10:00
have_canfork_callback | = ( bool ) ss - > can_fork < < ss - > id ;
2007-10-18 23:39:30 -07:00
2008-04-29 01:00:13 -07:00
/* At system boot, before all subsystems have been
* registered, no tasks have been forked, so we don't
* need to invoke fork callbacks here. */
BUG_ON ( ! list_empty ( & init_task . tasks ) ) ;
2013-08-13 20:22:50 -04:00
BUG_ON ( online_css ( css ) ) ;
2012-11-09 09:12:29 -08:00
2012-11-19 08:13:36 -08:00
mutex_unlock ( & cgroup_mutex ) ;
2007-10-18 23:39:30 -07:00
}
/**
2008-02-23 15:24:09 -08:00
* cgroup_init_early - cgroup initialization at system boot
*
* Initialize cgroups at system boot, and initialize any
* subsystems that request early init.
2007-10-18 23:39:30 -07:00
*/
int __init cgroup_init_early ( void )
{
2014-07-09 10:08:08 -04:00
static struct cgroup_sb_opts __initdata opts ;
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2007-10-18 23:39:30 -07:00
int i ;
2013-06-25 11:53:37 -07:00
2014-03-19 10:23:54 -04:00
init_cgroup_root ( & cgrp_dfl_root , & opts ) ;
2014-05-16 13:22:47 -04:00
cgrp_dfl_root . cgrp . self . flags | = CSS_NO_REF ;
2013-06-21 15:52:33 -07:00
RCU_INIT_POINTER ( init_task . cgroups , & init_css_set ) ;
2007-10-18 23:39:36 -07:00
2014-02-08 10:36:58 -05:00
for_each_subsys ( ss , i ) {
2014-02-08 10:36:58 -05:00
WARN ( ! ss - > css_alloc | | ! ss - > css_free | | ss - > name | | ss - > id ,
2016-02-26 13:07:38 +08:00
" invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s \n " ,
2014-02-08 10:36:58 -05:00
i , cgroup_subsys_name [ i ] , ss - > css_alloc , ss - > css_free ,
2014-02-08 10:36:58 -05:00
ss - > id , ss - > name ) ;
2014-02-08 10:36:58 -05:00
WARN ( strlen ( cgroup_subsys_name [ i ] ) > MAX_CGROUP_TYPE_NAMELEN ,
" cgroup_subsys_name %s too long \n " , cgroup_subsys_name [ i ] ) ;
2007-10-18 23:39:30 -07:00
2014-02-08 10:36:58 -05:00
ss - > id = i ;
2014-02-08 10:36:58 -05:00
ss - > name = cgroup_subsys_name [ i ] ;
2015-08-18 13:58:16 -07:00
if ( ! ss - > legacy_name )
ss - > legacy_name = cgroup_subsys_name [ i ] ;
2007-10-18 23:39:30 -07:00
if ( ss - > early_init )
2014-05-04 15:09:14 -04:00
cgroup_init_subsys ( ss , true ) ;
2007-10-18 23:39:30 -07:00
}
return 0 ;
}
2016-02-22 22:25:47 -05:00
static u16 cgroup_disable_mask __initdata ;
2015-09-25 16:24:27 -04:00
2007-10-18 23:39:30 -07:00
/**
2008-02-23 15:24:09 -08:00
* cgroup_init - cgroup initialization
*
* Register cgroup filesystem and /proc file, and initialize
* any subsystems that didn't request early init.
2007-10-18 23:39:30 -07:00
*/
int __init cgroup_init ( void )
{
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2015-10-15 17:00:43 -04:00
int ssid ;
2007-10-18 23:39:35 -07:00
2016-02-22 22:25:47 -05:00
BUILD_BUG_ON ( CGROUP_SUBSYS_COUNT > 16 ) ;
2015-09-16 12:53:17 -04:00
BUG_ON ( percpu_init_rwsem ( & cgroup_threadgroup_rwsem ) ) ;
2014-07-15 11:05:09 -04:00
BUG_ON ( cgroup_init_cftypes ( NULL , cgroup_dfl_base_files ) ) ;
BUG_ON ( cgroup_init_cftypes ( NULL , cgroup_legacy_base_files ) ) ;
2007-10-18 23:39:30 -07:00
2016-08-11 18:54:13 +02:00
/*
* The latency of the synchronize_sched() is too high for cgroups,
* avoid it at the cost of forcing all readers into the slow path.
*/
rcu_sync_enter_start ( & cgroup_threadgroup_rwsem . rss ) ;
2016-01-29 02:54:06 -06:00
get_user_ns ( init_cgroup_ns . user_ns ) ;
2013-04-14 11:36:57 -07:00
mutex_lock ( & cgroup_mutex ) ;
2016-03-03 09:57:57 -05:00
/*
* Add init_css_set to the hash table so that dfl_root can link to
* it during init.
*/
hash_add ( css_set_table , & init_css_set . hlist ,
css_set_hash ( init_css_set . subsys ) ) ;
2013-06-25 11:53:37 -07:00
2014-03-19 10:23:54 -04:00
BUG_ON ( cgroup_setup_root ( & cgrp_dfl_root , 0 ) ) ;
2010-08-05 13:53:35 -07:00
2013-04-14 11:36:57 -07:00
mutex_unlock ( & cgroup_mutex ) ;
2014-03-19 10:23:53 -04:00
for_each_subsys ( ss , ssid ) {
2014-05-04 15:09:14 -04:00
if ( ss - > early_init ) {
struct cgroup_subsys_state * css =
init_css_set . subsys [ ss - > id ] ;
css - > id = cgroup_idr_alloc ( & ss - > css_idr , css , 1 , 2 ,
GFP_KERNEL ) ;
BUG_ON ( css - > id < 0 ) ;
} else {
cgroup_init_subsys ( ss , false ) ;
}
2014-03-19 10:23:53 -04:00
2014-04-23 11:13:15 -04:00
list_add_tail ( & init_css_set . e_cset_node [ ssid ] ,
& cgrp_dfl_root . cgrp . e_csets [ ssid ] ) ;
2014-03-19 10:23:53 -04:00
/*
2014-06-05 17:16:30 +08:00
* Setting dfl_root subsys_mask needs to consider the
* disabled flag and cftype registration needs kmalloc,
* both of which aren't available during early_init.
2014-03-19 10:23:53 -04:00
*/
2015-09-25 16:24:27 -04:00
if ( cgroup_disable_mask & ( 1 < < ssid ) ) {
static_branch_disable ( cgroup_subsys_enabled_key [ ssid ] ) ;
printk ( KERN_INFO " Disabling %s control group subsystem \n " ,
ss - > name ) ;
2014-07-15 11:05:10 -04:00
continue ;
2015-09-25 16:24:27 -04:00
}
2014-07-15 11:05:10 -04:00
2016-02-11 13:34:49 -05:00
if ( cgroup_ssid_no_v1 ( ssid ) )
printk ( KERN_INFO " Disabling %s control group subsystem in v1 mounts \n " ,
ss - > name ) ;
2014-07-15 11:05:10 -04:00
cgrp_dfl_root . subsys_mask | = 1 < < ss - > id ;
2016-03-08 11:51:26 -05:00
if ( ss - > implicit_on_dfl )
cgrp_dfl_implicit_ss_mask | = 1 < < ss - > id ;
else if ( ! ss - > dfl_cftypes )
2016-02-23 10:00:50 -05:00
cgrp_dfl_inhibit_ss_mask | = 1 < < ss - > id ;
2014-07-15 11:05:10 -04:00
2014-07-15 11:05:10 -04:00
if ( ss - > dfl_cftypes = = ss - > legacy_cftypes ) {
WARN_ON ( cgroup_add_cftypes ( ss , ss - > dfl_cftypes ) ) ;
} else {
WARN_ON ( cgroup_add_dfl_cftypes ( ss , ss - > dfl_cftypes ) ) ;
WARN_ON ( cgroup_add_legacy_cftypes ( ss , ss - > legacy_cftypes ) ) ;
2014-06-05 17:16:30 +08:00
}
2015-02-19 17:34:46 +03:00
if ( ss - > bind )
ss - > bind ( init_css_set . subsys [ ssid ] ) ;
2014-03-19 10:23:53 -04:00
}
2013-04-14 11:36:57 -07:00
2016-03-03 09:57:57 -05:00
/* init_css_set.subsys[] has been updated, re-hash */
hash_del ( & init_css_set . hlist ) ;
hash_add ( css_set_table , & init_css_set . hlist ,
css_set_hash ( init_css_set . subsys ) ) ;
2015-10-15 17:00:43 -04:00
WARN_ON ( sysfs_create_mount_point ( fs_kobj , " cgroup " ) ) ;
WARN_ON ( register_filesystem ( & cgroup_fs_type ) ) ;
2015-11-16 11:13:34 -05:00
WARN_ON ( register_filesystem ( & cgroup2_fs_type ) ) ;
2015-10-15 17:00:43 -04:00
WARN_ON ( ! proc_create ( " cgroups " , 0 , NULL , & proc_cgroupstats_operations ) ) ;
2010-08-05 13:53:35 -07:00
2014-02-11 11:52:49 -05:00
return 0 ;
2007-10-18 23:39:30 -07:00
}
2007-10-18 23:39:33 -07:00
2013-11-22 17:14:39 -05:00
static int __init cgroup_wq_init ( void )
{
/*
* There isn't much point in executing destruction path in
* parallel. Good chunk is serialized with cgroup_mutex anyway.
2014-02-12 19:06:19 -05:00
* Use 1 for @max_active.
2013-11-22 17:14:39 -05:00
*
* We would prefer to do this in cgroup_init() above, but that
* is called before init_workqueues(): so leave this until after.
*/
2014-02-12 19:06:19 -05:00
cgroup_destroy_wq = alloc_workqueue ( " cgroup_destroy " , 0 , 1 ) ;
2013-11-22 17:14:39 -05:00
BUG_ON ( ! cgroup_destroy_wq ) ;
2013-11-29 10:42:58 -05:00
/*
* Used to destroy pidlists and separate to serve as flush domain.
* Cap @max_active to 1 too.
*/
cgroup_pidlist_destroy_wq = alloc_workqueue ( " cgroup_pidlist_destroy " ,
0 , 1 ) ;
BUG_ON ( ! cgroup_pidlist_destroy_wq ) ;
2013-11-22 17:14:39 -05:00
return 0 ;
}
core_initcall ( cgroup_wq_init ) ;
2007-10-18 23:39:35 -07:00
/*
* proc_cgroup_show()
* - Print task's cgroup paths into seq_file, one line for each hierarchy
* - Used for /proc/<pid>/cgroup.
*/
2014-09-18 16:03:15 +08:00
int proc_cgroup_show ( struct seq_file * m , struct pid_namespace * ns ,
struct pid * pid , struct task_struct * tsk )
2007-10-18 23:39:35 -07:00
{
2016-08-10 11:23:44 -04:00
char * buf ;
2007-10-18 23:39:35 -07:00
int retval ;
2014-03-19 10:23:54 -04:00
struct cgroup_root * root ;
2007-10-18 23:39:35 -07:00
retval = - ENOMEM ;
2014-02-12 09:29:50 -05:00
buf = kmalloc ( PATH_MAX , GFP_KERNEL ) ;
2007-10-18 23:39:35 -07:00
if ( ! buf )
goto out ;
mutex_lock ( & cgroup_mutex ) ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2007-10-18 23:39:35 -07:00
2014-03-19 10:23:53 -04:00
for_each_root ( root ) {
2007-10-18 23:39:35 -07:00
struct cgroup_subsys * ss ;
2007-10-18 23:40:44 -07:00
struct cgroup * cgrp ;
2013-12-06 15:11:57 -05:00
int ssid , count = 0 ;
2007-10-18 23:39:35 -07:00
2016-02-23 10:00:50 -05:00
if ( root = = & cgrp_dfl_root & & ! cgrp_dfl_visible )
2014-03-19 10:23:53 -04:00
continue ;
2009-09-23 15:56:23 -07:00
seq_printf ( m , " %d: " , root - > hierarchy_id ) ;
2015-08-18 13:58:16 -07:00
if ( root ! = & cgrp_dfl_root )
for_each_subsys ( ss , ssid )
if ( root - > subsys_mask & ( 1 < < ssid ) )
seq_printf ( m , " %s%s " , count + + ? " , " : " " ,
2015-08-18 13:58:16 -07:00
ss - > legacy_name ) ;
2009-09-23 15:56:19 -07:00
if ( strlen ( root - > name ) )
seq_printf ( m , " %sname=%s " , count ? " , " : " " ,
root - > name ) ;
2007-10-18 23:39:35 -07:00
seq_putc ( m , ' : ' ) ;
2015-10-15 16:41:53 -04:00
2009-09-23 15:56:22 -07:00
cgrp = task_cgroup_from_root ( tsk , root ) ;
2015-10-15 16:41:53 -04:00
/*
* On traditional hierarchies, all zombie tasks show up as
* belonging to the root cgroup. On the default hierarchy,
* while a zombie doesn't show up in "cgroup.procs" and
* thus can't be migrated, its /proc/PID/cgroup keeps
* reporting the cgroup it belonged to before exiting. If
* the cgroup is removed before the zombie is reaped,
* " (deleted)" is appended to the cgroup path.
*/
if ( cgroup_on_dfl ( cgrp ) | | ! ( tsk - > flags & PF_EXITING ) ) {
2016-08-10 11:23:44 -04:00
retval = cgroup_path_ns_locked ( cgrp , buf , PATH_MAX ,
2016-01-29 02:54:06 -06:00
current - > nsproxy - > cgroup_ns ) ;
2016-09-29 15:49:40 +02:00
if ( retval > = PATH_MAX )
2015-10-15 16:41:53 -04:00
retval = - ENAMETOOLONG ;
2016-09-29 15:49:40 +02:00
if ( retval < 0 )
2015-10-15 16:41:53 -04:00
goto out_unlock ;
2016-08-10 11:23:44 -04:00
seq_puts ( m , buf ) ;
} else {
seq_puts ( m , " / " ) ;
}
2015-10-15 16:41:53 -04:00
if ( cgroup_on_dfl ( cgrp ) & & cgroup_is_dead ( cgrp ) )
seq_puts ( m , " (deleted) \n " ) ;
else
seq_putc ( m , ' \n ' ) ;
2007-10-18 23:39:35 -07:00
}
2014-09-18 16:03:15 +08:00
retval = 0 ;
2007-10-18 23:39:35 -07:00
out_unlock :
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2007-10-18 23:39:35 -07:00
mutex_unlock ( & cgroup_mutex ) ;
kfree ( buf ) ;
out :
return retval ;
}
/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show ( struct seq_file * m , void * v )
{
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2007-10-18 23:39:35 -07:00
int i ;
2008-04-04 14:29:57 -07:00
seq_puts ( m , " #subsys_name \t hierarchy \t num_cgroups \t enabled \n " ) ;
2010-03-10 15:22:07 -08:00
/*
* ideally we don't want subsystems moving around while we do this.
* cgroup_mutex is also necessary to guarantee an atomic snapshot of
* subsys/hierarchy state.
*/
2007-10-18 23:39:35 -07:00
mutex_lock ( & cgroup_mutex ) ;
2013-06-25 11:53:37 -07:00
for_each_subsys ( ss , i )
2009-09-23 15:56:23 -07:00
seq_printf ( m , " %s \t %d \t %d \t %d \n " ,
2015-08-18 13:58:16 -07:00
ss - > legacy_name , ss - > root - > hierarchy_id ,
2015-09-18 11:56:28 -04:00
atomic_read ( & ss - > root - > nr_cgrps ) ,
cgroup_ssid_enabled ( i ) ) ;
2013-06-25 11:53:37 -07:00
2007-10-18 23:39:35 -07:00
mutex_unlock ( & cgroup_mutex ) ;
return 0 ;
}
static int cgroupstats_open ( struct inode * inode , struct file * file )
{
2008-03-29 03:07:28 +00:00
return single_open ( file , proc_cgroupstats_show , NULL ) ;
2007-10-18 23:39:35 -07:00
}
2009-10-01 15:43:56 -07:00
static const struct file_operations proc_cgroupstats_operations = {
2007-10-18 23:39:35 -07:00
. open = cgroupstats_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2007-10-18 23:39:33 -07:00
/**
2014-02-25 10:04:03 -05:00
* cgroup_fork - initialize cgroup related fields during copy_process()
2008-02-23 15:24:09 -08:00
* @child: pointer to task_struct of forking parent process.
2007-10-18 23:39:33 -07:00
*
2014-02-25 10:04:03 -05:00
* A task is associated with the init_css_set until cgroup_post_fork()
* attaches it to the parent's css_set. Empty cg_list indicates that
* @child isn't holding reference to its css_set.
2007-10-18 23:39:33 -07:00
*/
void cgroup_fork ( struct task_struct * child )
{
2014-02-25 10:04:03 -05:00
RCU_INIT_POINTER ( child - > cgroups , & init_css_set ) ;
2007-10-18 23:39:36 -07:00
INIT_LIST_HEAD ( & child - > cg_list ) ;
2007-10-18 23:39:33 -07:00
}
2015-06-09 21:32:09 +10:00
/**
* cgroup_can_fork - called on a new task before the process is exposed
* @child: the task in question.
*
* This calls the subsystem can_fork() callbacks. If the can_fork() callback
* returns an error, the fork aborts with that error code. This allows for
* a cgroup subsystem to conditionally allow or deny new forks.
*/
2015-12-03 10:24:08 -05:00
int cgroup_can_fork ( struct task_struct * child )
2015-06-09 21:32:09 +10:00
{
struct cgroup_subsys * ss ;
int i , j , ret ;
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , i , have_canfork_callback ) {
2015-12-03 10:24:08 -05:00
ret = ss - > can_fork ( child ) ;
2015-06-09 21:32:09 +10:00
if ( ret )
goto out_revert ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2015-06-09 21:32:09 +10:00
return 0 ;
out_revert :
for_each_subsys ( ss , j ) {
if ( j > = i )
break ;
if ( ss - > cancel_fork )
2015-12-03 10:24:08 -05:00
ss - > cancel_fork ( child ) ;
2015-06-09 21:32:09 +10:00
}
return ret ;
}
/**
* cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
* @child: the task in question
*
* This calls the cancel_fork() callbacks if a fork failed *after*
* cgroup_can_fork() succeded.
*/
2015-12-03 10:24:08 -05:00
void cgroup_cancel_fork ( struct task_struct * child )
2015-06-09 21:32:09 +10:00
{
struct cgroup_subsys * ss ;
int i ;
for_each_subsys ( ss , i )
if ( ss - > cancel_fork )
2015-12-03 10:24:08 -05:00
ss - > cancel_fork ( child ) ;
2015-06-09 21:32:09 +10:00
}
2007-10-18 23:39:36 -07:00
/**
2008-02-23 15:24:09 -08:00
* cgroup_post_fork - called on a new task after adding it to the task list
* @child: the task in question
*
2012-10-16 15:03:14 -07:00
* Adds the task to the list running through its css_set if necessary and
* call the subsystem fork() callbacks. Has to be after the task is
* visible on the task list in case we race with the first call to
2013-08-08 20:11:26 -04:00
* cgroup_task_iter_start() - to guarantee that the new task ends up on its
2012-10-16 15:03:14 -07:00
* list.
2008-02-23 15:24:09 -08:00
*/
2015-12-03 10:24:08 -05:00
void cgroup_post_fork ( struct task_struct * child )
2007-10-18 23:39:36 -07:00
{
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2012-10-16 15:03:14 -07:00
int i ;
2012-02-08 03:37:27 +01:00
/*
2014-08-25 19:27:52 +08:00
* This may race against cgroup_enable_task_cg_lists(). As that
2014-02-25 10:04:03 -05:00
* function sets use_task_css_set_links before grabbing
* tasklist_lock and we just went through tasklist_lock to add
* @child, it's guaranteed that either we see the set
* use_task_css_set_links or cgroup_enable_task_cg_lists() sees
* @child during its iteration.
*
* If we won the race, @child is associated with %current's
2015-10-15 16:41:53 -04:00
* css_set. Grabbing css_set_lock guarantees both that the
2014-02-25 10:04:03 -05:00
* association is stable, and, on completion of the parent's
* migration, @child is visible in the source of migration or
* already in the destination cgroup. This guarantee is necessary
* when implementing operations which need to migrate all tasks of
* a cgroup to another.
*
2014-08-25 19:27:52 +08:00
* Note that if we lose to cgroup_enable_task_cg_lists(), @child
2014-02-25 10:04:03 -05:00
* will remain in init_css_set. This is safe because all tasks are
* in the init_css_set before cg_links is enabled and there's no
* operation which transfers all tasks out of init_css_set.
2012-02-08 03:37:27 +01:00
*/
2007-10-18 23:39:36 -07:00
if ( use_task_css_set_links ) {
2014-02-25 10:04:03 -05:00
struct css_set * cset ;
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2014-02-25 10:04:03 -05:00
cset = task_css_set ( current ) ;
2014-02-25 10:04:03 -05:00
if ( list_empty ( & child - > cg_list ) ) {
get_css_set ( cset ) ;
2015-10-15 16:41:52 -04:00
css_set_move_task ( child , NULL , cset , false ) ;
2014-02-25 10:04:03 -05:00
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2007-10-18 23:39:36 -07:00
}
2012-10-16 15:03:14 -07:00
/*
* Call ss->fork(). This must happen after @child is linked on
* css_set; otherwise, @child might change state between ->fork()
* and addition to css_set.
*/
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , i , have_fork_callback ) {
2015-12-03 10:24:08 -05:00
ss - > fork ( child ) ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2007-10-18 23:39:36 -07:00
}
2012-10-16 15:03:14 -07:00
2007-10-18 23:39:33 -07:00
/**
* cgroup_exit - detach cgroup from exiting task
* @tsk: pointer to task_struct of exiting process
*
* Description: Detach cgroup from @tsk and release it.
*
* Note that cgroups marked notify_on_release force every task in
* them to take the global cgroup_mutex mutex when exiting.
* This could impact scaling on very large systems. Be reluctant to
* use notify_on_release cgroups where very high task exit scaling
* is required on large systems.
*
2014-02-25 10:04:03 -05:00
* We set the exiting tasks cgroup to the root cgroup (top_cgroup). We
* call cgroup_exit() while the task is still competent to handle
* notify_on_release(), then leave the task attached to the root cgroup in
* each hierarchy for the remainder of its exit. No need to bother with
* init_css_set refcnting. init_css_set never goes away and we can't race
2014-03-28 15:18:27 +08:00
* with migration path - PF_EXITING is visible to migration path.
2007-10-18 23:39:33 -07:00
*/
2014-03-28 15:22:19 +08:00
void cgroup_exit ( struct task_struct * tsk )
2007-10-18 23:39:33 -07:00
{
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2013-06-12 21:04:49 -07:00
struct css_set * cset ;
2011-02-07 17:02:20 +01:00
int i ;
2007-10-18 23:39:36 -07:00
/*
2014-02-25 10:04:03 -05:00
* Unlink from @tsk from its css_set. As migration path can't race
2015-10-15 16:41:49 -04:00
* with us, we can check css_set and cg_list without synchronization.
2007-10-18 23:39:36 -07:00
*/
2015-10-15 16:41:49 -04:00
cset = task_css_set ( tsk ) ;
2007-10-18 23:39:36 -07:00
if ( ! list_empty ( & tsk - > cg_list ) ) {
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2015-10-15 16:41:52 -04:00
css_set_move_task ( tsk , cset , NULL , false ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2015-10-15 16:41:53 -04:00
} else {
get_css_set ( cset ) ;
2007-10-18 23:39:36 -07:00
}
2015-06-06 10:02:14 +10:00
/* see cgroup_post_fork() for details */
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , i , have_exit_callback ) {
2015-10-15 16:41:53 -04:00
ss - > exit ( tsk ) ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2015-10-15 16:41:53 -04:00
}
2013-06-25 11:53:37 -07:00
2015-10-15 16:41:53 -04:00
void cgroup_free ( struct task_struct * task )
{
struct css_set * cset = task_css_set ( task ) ;
2015-10-15 16:41:53 -04:00
struct cgroup_subsys * ss ;
int ssid ;
2016-02-22 22:25:46 -05:00
do_each_subsys_mask ( ss , ssid , have_free_callback ) {
2015-10-15 16:41:53 -04:00
ss - > free ( task ) ;
2016-02-22 22:25:46 -05:00
} while_each_subsys_mask ( ) ;
2011-02-07 17:02:20 +01:00
2015-10-15 16:41:53 -04:00
put_css_set ( cset ) ;
2007-10-18 23:39:33 -07:00
}
2007-10-18 23:39:34 -07:00
2007-10-18 23:40:44 -07:00
static void check_for_release ( struct cgroup * cgrp )
2007-10-18 23:39:38 -07:00
{
2015-10-15 16:41:50 -04:00
if ( notify_on_release ( cgrp ) & & ! cgroup_is_populated ( cgrp ) & &
2014-09-18 16:06:19 +08:00
! css_has_online_children ( & cgrp - > self ) & & ! cgroup_is_dead ( cgrp ) )
schedule_work ( & cgrp - > release_agent_work ) ;
2007-10-18 23:39:38 -07:00
}
/*
* Notify userspace when a cgroup is released, by running the
* configured release agent with the name of the cgroup (path
* relative to the root of cgroup file system) as the argument.
*
* Most likely, this user command will try to rmdir this cgroup.
*
* This races with the possibility that some other task will be
* attached to this cgroup before it is removed, or that some other
* user task will 'mkdir' a child cgroup of this cgroup. That's ok.
* The presumed 'rmdir' will fail quietly if this cgroup is no longer
* unused, and this cgroup will be reprieved from its death sentence,
* to continue to serve a useful existence. Next time it's released,
* we will get notified again, if it still has 'notify_on_release' set.
*
* The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
* means only wait until the task is successfully execve()'d. The
* separate release agent task is forked by call_usermodehelper(),
* then control in this thread returns here, without waiting for the
* release agent task. We don't bother to wait because the caller of
* this routine has no use for the exit status of the release agent
* task, so no sense holding our caller up for that.
*/
static void cgroup_release_agent ( struct work_struct * work )
{
2014-09-18 16:06:19 +08:00
struct cgroup * cgrp =
container_of ( work , struct cgroup , release_agent_work ) ;
2016-08-10 11:23:44 -04:00
char * pathbuf = NULL , * agentbuf = NULL ;
2014-09-18 16:06:19 +08:00
char * argv [ 3 ] , * envp [ 3 ] ;
2016-08-10 11:23:44 -04:00
int ret ;
2014-09-18 16:06:19 +08:00
2007-10-18 23:39:38 -07:00
mutex_lock ( & cgroup_mutex ) ;
2014-09-18 16:06:19 +08:00
pathbuf = kmalloc ( PATH_MAX , GFP_KERNEL ) ;
agentbuf = kstrdup ( cgrp - > root - > release_agent_path , GFP_KERNEL ) ;
if ( ! pathbuf | | ! agentbuf )
goto out ;
2007-10-18 23:39:38 -07:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2016-08-10 11:23:44 -04:00
ret = cgroup_path_ns_locked ( cgrp , pathbuf , PATH_MAX , & init_cgroup_ns ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2016-09-29 15:49:40 +02:00
if ( ret < 0 | | ret > = PATH_MAX )
2014-09-18 16:06:19 +08:00
goto out ;
argv [ 0 ] = agentbuf ;
2016-08-10 11:23:44 -04:00
argv [ 1 ] = pathbuf ;
2014-09-18 16:06:19 +08:00
argv [ 2 ] = NULL ;
/* minimal command environment */
envp [ 0 ] = " HOME=/ " ;
envp [ 1 ] = " PATH=/sbin:/bin:/usr/sbin:/usr/bin " ;
envp [ 2 ] = NULL ;
2007-10-18 23:39:38 -07:00
mutex_unlock ( & cgroup_mutex ) ;
2014-09-18 16:06:19 +08:00
call_usermodehelper ( argv [ 0 ] , argv , envp , UMH_WAIT_EXEC ) ;
2014-09-20 14:35:43 +08:00
goto out_free ;
2014-09-18 16:06:19 +08:00
out :
2014-09-20 14:35:43 +08:00
mutex_unlock ( & cgroup_mutex ) ;
out_free :
2014-09-18 16:06:19 +08:00
kfree ( agentbuf ) ;
kfree ( pathbuf ) ;
2007-10-18 23:39:38 -07:00
}
2008-04-04 14:29:57 -07:00
static int __init cgroup_disable ( char * str )
{
2013-06-25 11:53:37 -07:00
struct cgroup_subsys * ss ;
2008-04-04 14:29:57 -07:00
char * token ;
2013-06-25 11:53:37 -07:00
int i ;
2008-04-04 14:29:57 -07:00
while ( ( token = strsep ( & str , " , " ) ) ! = NULL ) {
if ( ! * token )
continue ;
2012-09-13 09:50:55 +02:00
2014-02-08 10:36:58 -05:00
for_each_subsys ( ss , i ) {
2015-08-18 13:58:16 -07:00
if ( strcmp ( token , ss - > name ) & &
strcmp ( token , ss - > legacy_name ) )
continue ;
2015-09-25 16:24:27 -04:00
cgroup_disable_mask | = 1 < < i ;
2008-04-04 14:29:57 -07:00
}
}
return 1 ;
}
__setup ( " cgroup_disable= " , cgroup_disable ) ;
2009-04-02 16:57:25 -07:00
2016-02-11 13:34:49 -05:00
static int __init cgroup_no_v1 ( char * str )
{
struct cgroup_subsys * ss ;
char * token ;
int i ;
while ( ( token = strsep ( & str , " , " ) ) ! = NULL ) {
if ( ! * token )
continue ;
if ( ! strcmp ( token , " all " ) ) {
2016-02-22 22:25:47 -05:00
cgroup_no_v1_mask = U16_MAX ;
2016-02-11 13:34:49 -05:00
break ;
}
for_each_subsys ( ss , i ) {
if ( strcmp ( token , ss - > name ) & &
strcmp ( token , ss - > legacy_name ) )
continue ;
cgroup_no_v1_mask | = 1 < < i ;
}
}
return 1 ;
}
__setup ( " cgroup_no_v1= " , cgroup_no_v1 ) ;
2013-08-13 11:01:54 -04:00
/**
2014-05-13 12:11:01 -04:00
* css_tryget_online_from_dir - get corresponding css from a cgroup dentry
2013-08-26 18:40:56 -04:00
* @dentry: directory dentry of interest
* @ss: subsystem of interest
2013-08-13 11:01:54 -04:00
*
2014-02-11 11:52:47 -05:00
* If @dentry is a directory for a cgroup which has @ss enabled on it, try
* to get the corresponding css and return it. If such css doesn't exist
* or can't be pinned, an ERR_PTR value is returned.
2011-02-14 11:20:01 +02:00
*/
2014-05-13 12:11:01 -04:00
struct cgroup_subsys_state * css_tryget_online_from_dir ( struct dentry * dentry ,
struct cgroup_subsys * ss )
2011-02-14 11:20:01 +02:00
{
2014-02-11 11:52:49 -05:00
struct kernfs_node * kn = kernfs_node_from_dentry ( dentry ) ;
2016-02-23 10:00:51 -05:00
struct file_system_type * s_type = dentry - > d_sb - > s_type ;
2014-02-11 11:52:49 -05:00
struct cgroup_subsys_state * css = NULL ;
2011-02-14 11:20:01 +02:00
struct cgroup * cgrp ;
2013-08-26 18:40:56 -04:00
/* is @dentry a cgroup dir? */
2016-02-23 10:00:51 -05:00
if ( ( s_type ! = & cgroup_fs_type & & s_type ! = & cgroup2_fs_type ) | |
! kn | | kernfs_type ( kn ) ! = KERNFS_DIR )
2011-02-14 11:20:01 +02:00
return ERR_PTR ( - EBADF ) ;
2014-02-11 11:52:47 -05:00
rcu_read_lock ( ) ;
2014-02-11 11:52:49 -05:00
/*
* This path doesn't originate from kernfs and @kn could already
* have been or be removed at any point. @kn->priv is RCU
2014-09-04 14:43:07 +08:00
* protected for this access. See css_release_work_fn() for details.
2014-02-11 11:52:49 -05:00
*/
cgrp = rcu_dereference ( kn - > priv ) ;
if ( cgrp )
css = cgroup_css ( cgrp , ss ) ;
2014-02-11 11:52:47 -05:00
2014-05-13 12:11:01 -04:00
if ( ! css | | ! css_tryget_online ( css ) )
2014-02-11 11:52:47 -05:00
css = ERR_PTR ( - ENOENT ) ;
rcu_read_unlock ( ) ;
return css ;
2011-02-14 11:20:01 +02:00
}
2013-08-19 10:05:24 +08:00
/**
* css_from_id - lookup css by id
* @id: the cgroup id
* @ss: cgroup subsys to be looked into
*
* Returns the css if there's valid one with @id, otherwise returns NULL.
* Should be called under rcu_read_lock().
*/
struct cgroup_subsys_state * css_from_id ( int id , struct cgroup_subsys * ss )
{
2014-05-04 15:09:13 -04:00
WARN_ON_ONCE ( ! rcu_read_lock_held ( ) ) ;
2016-06-17 12:24:27 -04:00
return idr_find ( & ss - > css_idr , id ) ;
2011-02-14 11:20:01 +02:00
}
2015-11-20 15:55:52 -05:00
/**
* cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
* @path: path on the default hierarchy
*
* Find the cgroup at @path on the default hierarchy, increment its
* reference count and return it. Returns pointer to the found cgroup on
* success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
* if @path points to a non-directory.
*/
struct cgroup * cgroup_get_from_path ( const char * path )
{
struct kernfs_node * kn ;
struct cgroup * cgrp ;
mutex_lock ( & cgroup_mutex ) ;
kn = kernfs_walk_and_get ( cgrp_dfl_root . cgrp . kn , path ) ;
if ( kn ) {
if ( kernfs_type ( kn ) = = KERNFS_DIR ) {
cgrp = kn - > priv ;
cgroup_get ( cgrp ) ;
} else {
cgrp = ERR_PTR ( - ENOTDIR ) ;
}
kernfs_put ( kn ) ;
} else {
cgrp = ERR_PTR ( - ENOENT ) ;
}
mutex_unlock ( & cgroup_mutex ) ;
return cgrp ;
}
EXPORT_SYMBOL_GPL ( cgroup_get_from_path ) ;
2016-06-30 10:28:42 -07:00
/**
* cgroup_get_from_fd - get a cgroup pointer from a fd
* @fd: fd obtained by open(cgroup2_dir)
*
* Find the cgroup from a fd which should be obtained
* by opening a cgroup directory. Returns a pointer to the
* cgroup on success. ERR_PTR is returned if the cgroup
* cannot be found.
*/
struct cgroup * cgroup_get_from_fd ( int fd )
{
struct cgroup_subsys_state * css ;
struct cgroup * cgrp ;
struct file * f ;
f = fget_raw ( fd ) ;
if ( ! f )
return ERR_PTR ( - EBADF ) ;
css = css_tryget_online_from_dir ( f - > f_path . dentry , NULL ) ;
fput ( f ) ;
if ( IS_ERR ( css ) )
return ERR_CAST ( css ) ;
cgrp = css - > cgroup ;
if ( ! cgroup_on_dfl ( cgrp ) ) {
cgroup_put ( cgrp ) ;
return ERR_PTR ( - EBADF ) ;
}
return cgrp ;
}
EXPORT_SYMBOL_GPL ( cgroup_get_from_fd ) ;
2015-12-07 17:38:53 -05:00
/*
* sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
* definition in cgroup-defs.h.
*/
# ifdef CONFIG_SOCK_CGROUP_DATA
# if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
2015-12-14 11:24:06 -05:00
DEFINE_SPINLOCK ( cgroup_sk_update_lock ) ;
2015-12-07 17:38:53 -05:00
static bool cgroup_sk_alloc_disabled __read_mostly ;
void cgroup_sk_alloc_disable ( void )
{
if ( cgroup_sk_alloc_disabled )
return ;
pr_info ( " cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation \n " ) ;
cgroup_sk_alloc_disabled = true ;
}
# else
# define cgroup_sk_alloc_disabled false
# endif
void cgroup_sk_alloc ( struct sock_cgroup_data * skcd )
{
if ( cgroup_sk_alloc_disabled )
return ;
2016-09-19 14:44:38 -07:00
/* Socket clone path */
if ( skcd - > val ) {
cgroup_get ( sock_cgroup_ptr ( skcd ) ) ;
return ;
}
2015-12-07 17:38:53 -05:00
rcu_read_lock ( ) ;
while ( true ) {
struct css_set * cset ;
cset = task_css_set ( current ) ;
if ( likely ( cgroup_tryget ( cset - > dfl_cgrp ) ) ) {
skcd - > val = ( unsigned long ) cset - > dfl_cgrp ;
break ;
}
cpu_relax ( ) ;
}
rcu_read_unlock ( ) ;
}
void cgroup_sk_free ( struct sock_cgroup_data * skcd )
{
cgroup_put ( sock_cgroup_ptr ( skcd ) ) ;
}
# endif /* CONFIG_SOCK_CGROUP_DATA */
2016-01-29 02:54:06 -06:00
/* cgroup namespaces */
2016-08-08 14:25:30 -05:00
static struct ucounts * inc_cgroup_namespaces ( struct user_namespace * ns )
{
return inc_ucount ( ns , current_euid ( ) , UCOUNT_CGROUP_NAMESPACES ) ;
}
static void dec_cgroup_namespaces ( struct ucounts * ucounts )
{
dec_ucount ( ucounts , UCOUNT_CGROUP_NAMESPACES ) ;
}
2016-01-29 02:54:06 -06:00
static struct cgroup_namespace * alloc_cgroup_ns ( void )
{
struct cgroup_namespace * new_ns ;
int ret ;
new_ns = kzalloc ( sizeof ( struct cgroup_namespace ) , GFP_KERNEL ) ;
if ( ! new_ns )
return ERR_PTR ( - ENOMEM ) ;
ret = ns_alloc_inum ( & new_ns - > ns ) ;
if ( ret ) {
kfree ( new_ns ) ;
return ERR_PTR ( ret ) ;
}
atomic_set ( & new_ns - > count , 1 ) ;
new_ns - > ns . ops = & cgroupns_operations ;
return new_ns ;
}
void free_cgroup_ns ( struct cgroup_namespace * ns )
{
put_css_set ( ns - > root_cset ) ;
2016-08-08 14:25:30 -05:00
dec_cgroup_namespaces ( ns - > ucounts ) ;
2016-01-29 02:54:06 -06:00
put_user_ns ( ns - > user_ns ) ;
ns_free_inum ( & ns - > ns ) ;
kfree ( ns ) ;
}
EXPORT_SYMBOL ( free_cgroup_ns ) ;
struct cgroup_namespace * copy_cgroup_ns ( unsigned long flags ,
struct user_namespace * user_ns ,
struct cgroup_namespace * old_ns )
{
2016-02-28 08:59:33 -05:00
struct cgroup_namespace * new_ns ;
2016-08-08 14:25:30 -05:00
struct ucounts * ucounts ;
2016-02-28 08:59:33 -05:00
struct css_set * cset ;
2016-01-29 02:54:06 -06:00
BUG_ON ( ! old_ns ) ;
if ( ! ( flags & CLONE_NEWCGROUP ) ) {
get_cgroup_ns ( old_ns ) ;
return old_ns ;
}
/* Allow only sysadmin to create cgroup namespace. */
if ( ! ns_capable ( user_ns , CAP_SYS_ADMIN ) )
2016-02-28 08:59:33 -05:00
return ERR_PTR ( - EPERM ) ;
2016-01-29 02:54:06 -06:00
2016-08-08 14:25:30 -05:00
ucounts = inc_cgroup_namespaces ( user_ns ) ;
if ( ! ucounts )
2016-09-22 13:08:36 -05:00
return ERR_PTR ( - ENOSPC ) ;
2016-08-08 14:25:30 -05:00
2016-07-15 06:35:24 -05:00
/* It is not safe to take cgroup_mutex here */
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2016-01-29 02:54:06 -06:00
cset = task_css_set ( current ) ;
get_css_set ( cset ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2016-01-29 02:54:06 -06:00
new_ns = alloc_cgroup_ns ( ) ;
2016-02-18 11:44:24 -05:00
if ( IS_ERR ( new_ns ) ) {
2016-02-28 08:59:33 -05:00
put_css_set ( cset ) ;
2016-08-08 14:25:30 -05:00
dec_cgroup_namespaces ( ucounts ) ;
2016-02-28 08:59:33 -05:00
return new_ns ;
2016-02-18 11:44:24 -05:00
}
2016-01-29 02:54:06 -06:00
new_ns - > user_ns = get_user_ns ( user_ns ) ;
2016-08-08 14:25:30 -05:00
new_ns - > ucounts = ucounts ;
2016-01-29 02:54:06 -06:00
new_ns - > root_cset = cset ;
return new_ns ;
}
static inline struct cgroup_namespace * to_cg_ns ( struct ns_common * ns )
{
return container_of ( ns , struct cgroup_namespace , ns ) ;
}
2016-01-29 02:54:07 -06:00
static int cgroupns_install ( struct nsproxy * nsproxy , struct ns_common * ns )
2016-01-29 02:54:06 -06:00
{
2016-01-29 02:54:07 -06:00
struct cgroup_namespace * cgroup_ns = to_cg_ns ( ns ) ;
if ( ! ns_capable ( current_user_ns ( ) , CAP_SYS_ADMIN ) | |
! ns_capable ( cgroup_ns - > user_ns , CAP_SYS_ADMIN ) )
return - EPERM ;
/* Don't need to do anything if we are attaching to our own cgroupns. */
if ( cgroup_ns = = nsproxy - > cgroup_ns )
return 0 ;
get_cgroup_ns ( cgroup_ns ) ;
put_cgroup_ns ( nsproxy - > cgroup_ns ) ;
nsproxy - > cgroup_ns = cgroup_ns ;
return 0 ;
2016-01-29 02:54:06 -06:00
}
static struct ns_common * cgroupns_get ( struct task_struct * task )
{
struct cgroup_namespace * ns = NULL ;
struct nsproxy * nsproxy ;
task_lock ( task ) ;
nsproxy = task - > nsproxy ;
if ( nsproxy ) {
ns = nsproxy - > cgroup_ns ;
get_cgroup_ns ( ns ) ;
}
task_unlock ( task ) ;
return ns ? & ns - > ns : NULL ;
}
static void cgroupns_put ( struct ns_common * ns )
{
put_cgroup_ns ( to_cg_ns ( ns ) ) ;
}
2016-09-06 00:47:13 -07:00
static struct user_namespace * cgroupns_owner ( struct ns_common * ns )
{
return to_cg_ns ( ns ) - > user_ns ;
}
2016-01-29 02:54:06 -06:00
const struct proc_ns_operations cgroupns_operations = {
. name = " cgroup " ,
. type = CLONE_NEWCGROUP ,
. get = cgroupns_get ,
. put = cgroupns_put ,
. install = cgroupns_install ,
2016-09-06 00:47:13 -07:00
. owner = cgroupns_owner ,
2016-01-29 02:54:06 -06:00
} ;
static __init int cgroup_namespaces_init ( void )
{
return 0 ;
}
subsys_initcall ( cgroup_namespaces_init ) ;
2016-11-23 16:52:26 +01:00
# ifdef CONFIG_CGROUP_BPF
void cgroup_bpf_update ( struct cgroup * cgrp ,
struct bpf_prog * prog ,
enum bpf_attach_type type )
{
struct cgroup * parent = cgroup_parent ( cgrp ) ;
mutex_lock ( & cgroup_mutex ) ;
__cgroup_bpf_update ( cgrp , parent , prog , type ) ;
mutex_unlock ( & cgroup_mutex ) ;
}
# endif /* CONFIG_CGROUP_BPF */
2009-09-23 15:56:20 -07:00
# ifdef CONFIG_CGROUP_DEBUG
2013-08-08 20:11:23 -04:00
static struct cgroup_subsys_state *
debug_css_alloc ( struct cgroup_subsys_state * parent_css )
2009-09-23 15:56:20 -07:00
{
struct cgroup_subsys_state * css = kzalloc ( sizeof ( * css ) , GFP_KERNEL ) ;
if ( ! css )
return ERR_PTR ( - ENOMEM ) ;
return css ;
}
2013-08-08 20:11:23 -04:00
static void debug_css_free ( struct cgroup_subsys_state * css )
2009-09-23 15:56:20 -07:00
{
2013-08-08 20:11:23 -04:00
kfree ( css ) ;
2009-09-23 15:56:20 -07:00
}
2013-08-08 20:11:24 -04:00
static u64 debug_taskcount_read ( struct cgroup_subsys_state * css ,
struct cftype * cft )
2009-09-23 15:56:20 -07:00
{
2013-08-08 20:11:24 -04:00
return cgroup_task_count ( css - > cgroup ) ;
2009-09-23 15:56:20 -07:00
}
2013-08-08 20:11:24 -04:00
static u64 current_css_set_read ( struct cgroup_subsys_state * css ,
struct cftype * cft )
2009-09-23 15:56:20 -07:00
{
return ( u64 ) ( unsigned long ) current - > cgroups ;
}
2013-08-08 20:11:24 -04:00
static u64 current_css_set_refcount_read ( struct cgroup_subsys_state * css ,
2013-06-14 11:17:19 +08:00
struct cftype * cft )
2009-09-23 15:56:20 -07:00
{
u64 count ;
rcu_read_lock ( ) ;
2013-06-21 15:52:04 -07:00
count = atomic_read ( & task_css_set ( current ) - > refcount ) ;
2009-09-23 15:56:20 -07:00
rcu_read_unlock ( ) ;
return count ;
}
2013-12-05 12:28:04 -05:00
static int current_css_set_cg_links_read ( struct seq_file * seq , void * v )
2009-09-23 15:56:22 -07:00
{
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link ;
2013-06-12 21:04:49 -07:00
struct css_set * cset ;
2014-02-12 09:29:50 -05:00
char * name_buf ;
2009-09-23 15:56:22 -07:00
2014-02-12 09:29:50 -05:00
name_buf = kmalloc ( NAME_MAX + 1 , GFP_KERNEL ) ;
if ( ! name_buf )
return - ENOMEM ;
2009-09-23 15:56:22 -07:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2009-09-23 15:56:22 -07:00
rcu_read_lock ( ) ;
2013-06-12 21:04:49 -07:00
cset = rcu_dereference ( current - > cgroups ) ;
2013-06-12 21:04:50 -07:00
list_for_each_entry ( link , & cset - > cgrp_links , cgrp_link ) {
2009-09-23 15:56:22 -07:00
struct cgroup * c = link - > cgrp ;
2014-03-19 10:23:55 -04:00
cgroup_name ( c , name_buf , NAME_MAX + 1 ) ;
2009-09-23 15:56:23 -07:00
seq_printf ( seq , " Root %d group %s \n " ,
2014-03-19 10:23:55 -04:00
c - > root - > hierarchy_id , name_buf ) ;
2009-09-23 15:56:22 -07:00
}
rcu_read_unlock ( ) ;
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2014-02-12 09:29:50 -05:00
kfree ( name_buf ) ;
2009-09-23 15:56:22 -07:00
return 0 ;
}
# define MAX_TASKS_SHOWN_PER_CSS 25
2013-12-05 12:28:04 -05:00
static int cgroup_css_links_read ( struct seq_file * seq , void * v )
2009-09-23 15:56:22 -07:00
{
2013-12-05 12:28:04 -05:00
struct cgroup_subsys_state * css = seq_css ( seq ) ;
2013-06-12 21:04:50 -07:00
struct cgrp_cset_link * link ;
2009-09-23 15:56:22 -07:00
2016-06-22 17:28:41 -03:00
spin_lock_irq ( & css_set_lock ) ;
2013-08-08 20:11:24 -04:00
list_for_each_entry ( link , & css - > cgroup - > cset_links , cset_link ) {
2013-06-12 21:04:50 -07:00
struct css_set * cset = link - > cset ;
2009-09-23 15:56:22 -07:00
struct task_struct * task ;
int count = 0 ;
2014-02-25 10:04:01 -05:00
2013-06-12 21:04:49 -07:00
seq_printf ( seq , " css_set %p \n " , cset ) ;
2014-02-25 10:04:01 -05:00
2013-06-12 21:04:49 -07:00
list_for_each_entry ( task , & cset - > tasks , cg_list ) {
2014-02-25 10:04:01 -05:00
if ( count + + > MAX_TASKS_SHOWN_PER_CSS )
goto overflow ;
seq_printf ( seq , " task %d \n " , task_pid_vnr ( task ) ) ;
2009-09-23 15:56:22 -07:00
}
2014-02-25 10:04:01 -05:00
list_for_each_entry ( task , & cset - > mg_tasks , cg_list ) {
if ( count + + > MAX_TASKS_SHOWN_PER_CSS )
goto overflow ;
seq_printf ( seq , " task %d \n " , task_pid_vnr ( task ) ) ;
}
continue ;
overflow :
seq_puts ( seq , " ... \n " ) ;
2009-09-23 15:56:22 -07:00
}
2016-06-22 17:28:41 -03:00
spin_unlock_irq ( & css_set_lock ) ;
2009-09-23 15:56:22 -07:00
return 0 ;
}
2013-08-08 20:11:24 -04:00
static u64 releasable_read ( struct cgroup_subsys_state * css , struct cftype * cft )
2009-09-23 15:56:20 -07:00
{
2015-10-15 16:41:50 -04:00
return ( ! cgroup_is_populated ( css - > cgroup ) & &
2014-09-19 16:51:00 +08:00
! css_has_online_children ( & css - > cgroup - > self ) ) ;
2009-09-23 15:56:20 -07:00
}
static struct cftype debug_files [ ] = {
{
. name = " taskcount " ,
. read_u64 = debug_taskcount_read ,
} ,
{
. name = " current_css_set " ,
. read_u64 = current_css_set_read ,
} ,
{
. name = " current_css_set_refcount " ,
. read_u64 = current_css_set_refcount_read ,
} ,
2009-09-23 15:56:22 -07:00
{
. name = " current_css_set_cg_links " ,
2013-12-05 12:28:04 -05:00
. seq_show = current_css_set_cg_links_read ,
2009-09-23 15:56:22 -07:00
} ,
{
. name = " cgroup_css_links " ,
2013-12-05 12:28:04 -05:00
. seq_show = cgroup_css_links_read ,
2009-09-23 15:56:22 -07:00
} ,
2009-09-23 15:56:20 -07:00
{
. name = " releasable " ,
. read_u64 = releasable_read ,
} ,
2012-04-01 12:09:55 -07:00
{ } /* terminate */
} ;
2009-09-23 15:56:20 -07:00
2014-02-08 10:36:58 -05:00
struct cgroup_subsys debug_cgrp_subsys = {
2012-11-19 08:13:38 -08:00
. css_alloc = debug_css_alloc ,
. css_free = debug_css_free ,
2014-07-15 11:05:09 -04:00
. legacy_cftypes = debug_files ,
2009-09-23 15:56:20 -07:00
} ;
# endif /* CONFIG_CGROUP_DEBUG */