core: split out cgroup specific state fields from Unit → CGroupRuntime

This refactors the Unit structure a bit: all cgroup-related state fields
are moved to a new structure CGroupRuntime, which is only allocated as
we realize a cgroup.

This is both a nice cleanup and should make unit structures considerably
smaller that have no cgroup associated, because never realized or
because they belong to a unit type that doesn#t have cgroups anyway.

This makes things nicely symmetric:

        ExecContext → static user configuration about execution
        ExecRuntime → dynamic user state of execution
        CGroupContext → static user configuration about cgroups
        CGroupRuntime → dynamic user state of cgroups

And each time the XyzContext is part of the unit type structures such as
Service or Slice that need it, but the runtime object is only allocated
when a unit is started.
This commit is contained in:
Lennart Poettering
2024-01-27 10:59:15 +01:00
parent 3f236f24ba
commit 9cc545447e
28 changed files with 1349 additions and 751 deletions

View File

@@ -196,19 +196,26 @@ static int bpf_firewall_compile_bpf(
_cleanup_(bpf_program_freep) BPFProgram *p = NULL;
int accounting_map_fd, r;
bool access_enabled;
CGroupRuntime *crt;
assert(u);
assert(ret);
crt = unit_get_cgroup_runtime(u);
if (!crt) {
*ret = NULL;
return 0;
}
accounting_map_fd = is_ingress ?
u->ip_accounting_ingress_map_fd :
u->ip_accounting_egress_map_fd;
crt->ip_accounting_ingress_map_fd :
crt->ip_accounting_egress_map_fd;
access_enabled =
u->ipv4_allow_map_fd >= 0 ||
u->ipv6_allow_map_fd >= 0 ||
u->ipv4_deny_map_fd >= 0 ||
u->ipv6_deny_map_fd >= 0 ||
crt->ipv4_allow_map_fd >= 0 ||
crt->ipv6_allow_map_fd >= 0 ||
crt->ipv4_deny_map_fd >= 0 ||
crt->ipv6_deny_map_fd >= 0 ||
ip_allow_any ||
ip_deny_any;
@@ -234,26 +241,26 @@ static int bpf_firewall_compile_bpf(
* - Otherwise, access will be granted
*/
if (u->ipv4_deny_map_fd >= 0) {
r = add_lookup_instructions(p, u->ipv4_deny_map_fd, ETH_P_IP, is_ingress, ACCESS_DENIED);
if (crt->ipv4_deny_map_fd >= 0) {
r = add_lookup_instructions(p, crt->ipv4_deny_map_fd, ETH_P_IP, is_ingress, ACCESS_DENIED);
if (r < 0)
return r;
}
if (u->ipv6_deny_map_fd >= 0) {
r = add_lookup_instructions(p, u->ipv6_deny_map_fd, ETH_P_IPV6, is_ingress, ACCESS_DENIED);
if (crt->ipv6_deny_map_fd >= 0) {
r = add_lookup_instructions(p, crt->ipv6_deny_map_fd, ETH_P_IPV6, is_ingress, ACCESS_DENIED);
if (r < 0)
return r;
}
if (u->ipv4_allow_map_fd >= 0) {
r = add_lookup_instructions(p, u->ipv4_allow_map_fd, ETH_P_IP, is_ingress, ACCESS_ALLOWED);
if (crt->ipv4_allow_map_fd >= 0) {
r = add_lookup_instructions(p, crt->ipv4_allow_map_fd, ETH_P_IP, is_ingress, ACCESS_ALLOWED);
if (r < 0)
return r;
}
if (u->ipv6_allow_map_fd >= 0) {
r = add_lookup_instructions(p, u->ipv6_allow_map_fd, ETH_P_IPV6, is_ingress, ACCESS_ALLOWED);
if (crt->ipv6_allow_map_fd >= 0) {
r = add_lookup_instructions(p, crt->ipv6_allow_map_fd, ETH_P_IPV6, is_ingress, ACCESS_ALLOWED);
if (r < 0)
return r;
}
@@ -495,37 +502,36 @@ static int bpf_firewall_prepare_access_maps(
return 0;
}
static int bpf_firewall_prepare_accounting_maps(Unit *u, bool enabled, int *fd_ingress, int *fd_egress) {
static int bpf_firewall_prepare_accounting_maps(Unit *u, bool enabled, CGroupRuntime *crt) {
int r;
assert(u);
assert(fd_ingress);
assert(fd_egress);
assert(crt);
if (enabled) {
if (*fd_ingress < 0) {
if (crt->ip_accounting_ingress_map_fd < 0) {
char *name = strjoina("I_", u->id);
r = bpf_map_new(name, BPF_MAP_TYPE_ARRAY, sizeof(int), sizeof(uint64_t), 2, 0);
if (r < 0)
return r;
*fd_ingress = r;
crt->ip_accounting_ingress_map_fd = r;
}
if (*fd_egress < 0) {
if (crt->ip_accounting_egress_map_fd < 0) {
char *name = strjoina("E_", u->id);
r = bpf_map_new(name, BPF_MAP_TYPE_ARRAY, sizeof(int), sizeof(uint64_t), 2, 0);
if (r < 0)
return r;
*fd_egress = r;
crt->ip_accounting_egress_map_fd = r;
}
} else {
*fd_ingress = safe_close(*fd_ingress);
*fd_egress = safe_close(*fd_egress);
crt->ip_accounting_ingress_map_fd = safe_close(crt->ip_accounting_ingress_map_fd);
crt->ip_accounting_egress_map_fd = safe_close(crt->ip_accounting_egress_map_fd);
zero(u->ip_accounting_extra);
zero(crt->ip_accounting_extra);
}
return 0;
@@ -535,6 +541,7 @@ int bpf_firewall_compile(Unit *u) {
const char *ingress_name = NULL, *egress_name = NULL;
bool ip_allow_any = false, ip_deny_any = false;
CGroupContext *cc;
CGroupRuntime *crt;
int r, supported;
assert(u);
@@ -543,6 +550,10 @@ int bpf_firewall_compile(Unit *u) {
if (!cc)
return -EINVAL;
crt = unit_setup_cgroup_runtime(u);
if (!crt)
return -ENOMEM;
supported = bpf_firewall_supported();
if (supported < 0)
return supported;
@@ -569,14 +580,14 @@ int bpf_firewall_compile(Unit *u) {
* but we reuse the accounting maps. That way the firewall in effect always maps to the actual
* configuration, but we don't flush out the accounting unnecessarily */
u->ip_bpf_ingress = bpf_program_free(u->ip_bpf_ingress);
u->ip_bpf_egress = bpf_program_free(u->ip_bpf_egress);
crt->ip_bpf_ingress = bpf_program_free(crt->ip_bpf_ingress);
crt->ip_bpf_egress = bpf_program_free(crt->ip_bpf_egress);
u->ipv4_allow_map_fd = safe_close(u->ipv4_allow_map_fd);
u->ipv4_deny_map_fd = safe_close(u->ipv4_deny_map_fd);
crt->ipv4_allow_map_fd = safe_close(crt->ipv4_allow_map_fd);
crt->ipv4_deny_map_fd = safe_close(crt->ipv4_deny_map_fd);
u->ipv6_allow_map_fd = safe_close(u->ipv6_allow_map_fd);
u->ipv6_deny_map_fd = safe_close(u->ipv6_deny_map_fd);
crt->ipv6_allow_map_fd = safe_close(crt->ipv6_allow_map_fd);
crt->ipv6_deny_map_fd = safe_close(crt->ipv6_deny_map_fd);
if (u->type != UNIT_SLICE) {
/* In inner nodes we only do accounting, we do not actually bother with access control. However, leaf
@@ -585,24 +596,24 @@ int bpf_firewall_compile(Unit *u) {
* means that all configure IP access rules *will* take effect on processes, even though we never
* compile them for inner nodes. */
r = bpf_firewall_prepare_access_maps(u, ACCESS_ALLOWED, &u->ipv4_allow_map_fd, &u->ipv6_allow_map_fd, &ip_allow_any);
r = bpf_firewall_prepare_access_maps(u, ACCESS_ALLOWED, &crt->ipv4_allow_map_fd, &crt->ipv6_allow_map_fd, &ip_allow_any);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-firewall: Preparation of BPF allow maps failed: %m");
r = bpf_firewall_prepare_access_maps(u, ACCESS_DENIED, &u->ipv4_deny_map_fd, &u->ipv6_deny_map_fd, &ip_deny_any);
r = bpf_firewall_prepare_access_maps(u, ACCESS_DENIED, &crt->ipv4_deny_map_fd, &crt->ipv6_deny_map_fd, &ip_deny_any);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-firewall: Preparation of BPF deny maps failed: %m");
}
r = bpf_firewall_prepare_accounting_maps(u, cc->ip_accounting, &u->ip_accounting_ingress_map_fd, &u->ip_accounting_egress_map_fd);
r = bpf_firewall_prepare_accounting_maps(u, cc->ip_accounting, crt);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-firewall: Preparation of BPF accounting maps failed: %m");
r = bpf_firewall_compile_bpf(u, ingress_name, true, &u->ip_bpf_ingress, ip_allow_any, ip_deny_any);
r = bpf_firewall_compile_bpf(u, ingress_name, true, &crt->ip_bpf_ingress, ip_allow_any, ip_deny_any);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-firewall: Compilation of ingress BPF program failed: %m");
r = bpf_firewall_compile_bpf(u, egress_name, false, &u->ip_bpf_egress, ip_allow_any, ip_deny_any);
r = bpf_firewall_compile_bpf(u, egress_name, false, &crt->ip_bpf_egress, ip_allow_any, ip_deny_any);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-firewall: Compilation of egress BPF program failed: %m");
@@ -634,6 +645,7 @@ static int load_bpf_progs_from_fs_to_set(Unit *u, char **filter_paths, Set **set
int bpf_firewall_load_custom(Unit *u) {
CGroupContext *cc;
CGroupRuntime *crt;
int r, supported;
assert(u);
@@ -641,6 +653,9 @@ int bpf_firewall_load_custom(Unit *u) {
cc = unit_get_cgroup_context(u);
if (!cc)
return 0;
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
if (!(cc->ip_filters_ingress || cc->ip_filters_egress))
return 0;
@@ -653,10 +668,10 @@ int bpf_firewall_load_custom(Unit *u) {
return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EOPNOTSUPP),
"bpf-firewall: BPF_F_ALLOW_MULTI not supported, cannot attach custom BPF programs.");
r = load_bpf_progs_from_fs_to_set(u, cc->ip_filters_ingress, &u->ip_bpf_custom_ingress);
r = load_bpf_progs_from_fs_to_set(u, cc->ip_filters_ingress, &crt->ip_bpf_custom_ingress);
if (r < 0)
return r;
r = load_bpf_progs_from_fs_to_set(u, cc->ip_filters_egress, &u->ip_bpf_custom_egress);
r = load_bpf_progs_from_fs_to_set(u, cc->ip_filters_egress, &crt->ip_bpf_custom_egress);
if (r < 0)
return r;
@@ -686,6 +701,7 @@ int bpf_firewall_install(Unit *u) {
_cleanup_(bpf_program_freep) BPFProgram *ip_bpf_ingress_uninstall = NULL, *ip_bpf_egress_uninstall = NULL;
_cleanup_free_ char *path = NULL;
CGroupContext *cc;
CGroupRuntime *crt;
int r, supported;
uint32_t flags;
@@ -694,9 +710,12 @@ int bpf_firewall_install(Unit *u) {
cc = unit_get_cgroup_context(u);
if (!cc)
return -EINVAL;
if (!u->cgroup_path)
crt = unit_get_cgroup_runtime(u);
if (!crt)
return -EINVAL;
if (!u->cgroup_realized)
if (!crt->cgroup_path)
return -EINVAL;
if (!crt->cgroup_realized)
return -EINVAL;
supported = bpf_firewall_supported();
@@ -709,11 +728,11 @@ int bpf_firewall_install(Unit *u) {
return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EOPNOTSUPP),
"bpf-firewall: BPF_F_ALLOW_MULTI not supported, not doing BPF firewall on slice units.");
if (supported != BPF_FIREWALL_SUPPORTED_WITH_MULTI &&
(!set_isempty(u->ip_bpf_custom_ingress) || !set_isempty(u->ip_bpf_custom_egress)))
(!set_isempty(crt->ip_bpf_custom_ingress) || !set_isempty(crt->ip_bpf_custom_egress)))
return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EOPNOTSUPP),
"bpf-firewall: BPF_F_ALLOW_MULTI not supported, cannot attach custom BPF programs.");
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, NULL, &path);
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, NULL, &path);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-firewall: Failed to determine cgroup path: %m");
@@ -724,44 +743,44 @@ int bpf_firewall_install(Unit *u) {
* after attaching the new programs, so that there's no time window where neither program is
* attached. (There will be a program where both are attached, but that's OK, since this is a
* security feature where we rather want to lock down too much than too little */
ip_bpf_egress_uninstall = TAKE_PTR(u->ip_bpf_egress_installed);
ip_bpf_ingress_uninstall = TAKE_PTR(u->ip_bpf_ingress_installed);
ip_bpf_egress_uninstall = TAKE_PTR(crt->ip_bpf_egress_installed);
ip_bpf_ingress_uninstall = TAKE_PTR(crt->ip_bpf_ingress_installed);
} else {
/* If we don't have BPF_F_ALLOW_MULTI then unref the old BPF programs (which will implicitly
* detach them) right before attaching the new program, to minimize the time window when we
* don't account for IP traffic. */
u->ip_bpf_egress_installed = bpf_program_free(u->ip_bpf_egress_installed);
u->ip_bpf_ingress_installed = bpf_program_free(u->ip_bpf_ingress_installed);
crt->ip_bpf_egress_installed = bpf_program_free(crt->ip_bpf_egress_installed);
crt->ip_bpf_ingress_installed = bpf_program_free(crt->ip_bpf_ingress_installed);
}
if (u->ip_bpf_egress) {
r = bpf_program_cgroup_attach(u->ip_bpf_egress, BPF_CGROUP_INET_EGRESS, path, flags);
if (crt->ip_bpf_egress) {
r = bpf_program_cgroup_attach(crt->ip_bpf_egress, BPF_CGROUP_INET_EGRESS, path, flags);
if (r < 0)
return log_unit_error_errno(u, r,
"bpf-firewall: Attaching egress BPF program to cgroup %s failed: %m", path);
/* Remember that this BPF program is installed now. */
u->ip_bpf_egress_installed = TAKE_PTR(u->ip_bpf_egress);
crt->ip_bpf_egress_installed = TAKE_PTR(crt->ip_bpf_egress);
}
if (u->ip_bpf_ingress) {
r = bpf_program_cgroup_attach(u->ip_bpf_ingress, BPF_CGROUP_INET_INGRESS, path, flags);
if (crt->ip_bpf_ingress) {
r = bpf_program_cgroup_attach(crt->ip_bpf_ingress, BPF_CGROUP_INET_INGRESS, path, flags);
if (r < 0)
return log_unit_error_errno(u, r,
"bpf-firewall: Attaching ingress BPF program to cgroup %s failed: %m", path);
u->ip_bpf_ingress_installed = TAKE_PTR(u->ip_bpf_ingress);
crt->ip_bpf_ingress_installed = TAKE_PTR(crt->ip_bpf_ingress);
}
/* And now, definitely get rid of the old programs, and detach them */
ip_bpf_egress_uninstall = bpf_program_free(ip_bpf_egress_uninstall);
ip_bpf_ingress_uninstall = bpf_program_free(ip_bpf_ingress_uninstall);
r = attach_custom_bpf_progs(u, path, BPF_CGROUP_INET_EGRESS, &u->ip_bpf_custom_egress, &u->ip_bpf_custom_egress_installed);
r = attach_custom_bpf_progs(u, path, BPF_CGROUP_INET_EGRESS, &crt->ip_bpf_custom_egress, &crt->ip_bpf_custom_egress_installed);
if (r < 0)
return r;
r = attach_custom_bpf_progs(u, path, BPF_CGROUP_INET_INGRESS, &u->ip_bpf_custom_ingress, &u->ip_bpf_custom_ingress_installed);
r = attach_custom_bpf_progs(u, path, BPF_CGROUP_INET_INGRESS, &crt->ip_bpf_custom_ingress, &crt->ip_bpf_custom_ingress_installed);
if (r < 0)
return r;
@@ -954,21 +973,25 @@ void emit_bpf_firewall_warning(Unit *u) {
void bpf_firewall_close(Unit *u) {
assert(u);
u->ip_accounting_ingress_map_fd = safe_close(u->ip_accounting_ingress_map_fd);
u->ip_accounting_egress_map_fd = safe_close(u->ip_accounting_egress_map_fd);
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (!crt)
return;
u->ipv4_allow_map_fd = safe_close(u->ipv4_allow_map_fd);
u->ipv6_allow_map_fd = safe_close(u->ipv6_allow_map_fd);
u->ipv4_deny_map_fd = safe_close(u->ipv4_deny_map_fd);
u->ipv6_deny_map_fd = safe_close(u->ipv6_deny_map_fd);
crt->ip_accounting_ingress_map_fd = safe_close(crt->ip_accounting_ingress_map_fd);
crt->ip_accounting_egress_map_fd = safe_close(crt->ip_accounting_egress_map_fd);
u->ip_bpf_ingress = bpf_program_free(u->ip_bpf_ingress);
u->ip_bpf_ingress_installed = bpf_program_free(u->ip_bpf_ingress_installed);
u->ip_bpf_egress = bpf_program_free(u->ip_bpf_egress);
u->ip_bpf_egress_installed = bpf_program_free(u->ip_bpf_egress_installed);
crt->ipv4_allow_map_fd = safe_close(crt->ipv4_allow_map_fd);
crt->ipv6_allow_map_fd = safe_close(crt->ipv6_allow_map_fd);
crt->ipv4_deny_map_fd = safe_close(crt->ipv4_deny_map_fd);
crt->ipv6_deny_map_fd = safe_close(crt->ipv6_deny_map_fd);
u->ip_bpf_custom_ingress = set_free(u->ip_bpf_custom_ingress);
u->ip_bpf_custom_egress = set_free(u->ip_bpf_custom_egress);
u->ip_bpf_custom_ingress_installed = set_free(u->ip_bpf_custom_ingress_installed);
u->ip_bpf_custom_egress_installed = set_free(u->ip_bpf_custom_egress_installed);
crt->ip_bpf_ingress = bpf_program_free(crt->ip_bpf_ingress);
crt->ip_bpf_ingress_installed = bpf_program_free(crt->ip_bpf_ingress_installed);
crt->ip_bpf_egress = bpf_program_free(crt->ip_bpf_egress);
crt->ip_bpf_egress_installed = bpf_program_free(crt->ip_bpf_egress_installed);
crt->ip_bpf_custom_ingress = set_free(crt->ip_bpf_custom_ingress);
crt->ip_bpf_custom_egress = set_free(crt->ip_bpf_custom_egress);
crt->ip_bpf_custom_ingress_installed = set_free(crt->ip_bpf_custom_ingress_installed);
crt->ip_bpf_custom_egress_installed = set_free(crt->ip_bpf_custom_egress_installed);
}

View File

@@ -81,6 +81,7 @@ static int bpf_foreign_prepare(
Unit *u,
enum bpf_attach_type attach_type,
const char *bpffs_path) {
_cleanup_(bpf_program_freep) BPFProgram *prog = NULL;
_cleanup_free_ BPFForeignKey *key = NULL;
uint32_t prog_id;
@@ -101,6 +102,11 @@ static int bpf_foreign_prepare(
return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
"bpf-foreign: Path in BPF filesystem is expected.");
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (!crt)
return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
"Failed to get control group runtime object.");
r = bpf_program_new_from_bpffs_path(bpffs_path, &prog);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-foreign: Failed to create foreign BPF program: %m");
@@ -114,7 +120,7 @@ static int bpf_foreign_prepare(
return log_unit_error_errno(u, r,
"bpf-foreign: Failed to create foreign BPF program key from path '%s': %m", bpffs_path);
r = hashmap_ensure_put(&u->bpf_foreign_by_key, &bpf_foreign_by_key_hash_ops, key, prog);
r = hashmap_ensure_put(&crt->bpf_foreign_by_key, &bpf_foreign_by_key_hash_ops, key, prog);
if (r == -EEXIST) {
log_unit_warning_errno(u, r, "bpf-foreign: Foreign BPF program already exists, ignoring: %m");
return 0;
@@ -131,6 +137,7 @@ static int bpf_foreign_prepare(
int bpf_foreign_install(Unit *u) {
_cleanup_free_ char *cgroup_path = NULL;
CGroupContext *cc;
CGroupRuntime *crt;
int r, ret = 0;
assert(u);
@@ -139,7 +146,11 @@ int bpf_foreign_install(Unit *u) {
if (!cc)
return 0;
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, NULL, &cgroup_path);
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, NULL, &cgroup_path);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-foreign: Failed to get cgroup path: %m");
@@ -149,6 +160,6 @@ int bpf_foreign_install(Unit *u) {
ret = r;
}
r = attach_programs(u, cgroup_path, u->bpf_foreign_by_key, BPF_F_ALLOW_MULTI);
r = attach_programs(u, cgroup_path, crt->bpf_foreign_by_key, BPF_F_ALLOW_MULTI);
return ret < 0 ? ret : r;
}

View File

@@ -209,7 +209,9 @@ int bpf_restrict_fs_update(const Set *filesystems, uint64_t cgroup_id, int outer
return 0;
}
int bpf_restrict_fs_cleanup(const Unit *u) {
int bpf_restrict_fs_cleanup(Unit *u) {
CGroupRuntime *crt;
assert(u);
assert(u->manager);
@@ -220,14 +222,18 @@ int bpf_restrict_fs_cleanup(const Unit *u) {
if (!u->manager->restrict_fs)
return 0;
if (u->cgroup_id == 0)
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
if (crt->cgroup_id == 0)
return 0;
int fd = sym_bpf_map__fd(u->manager->restrict_fs->maps.cgroup_hash);
if (fd < 0)
return log_unit_error_errno(u, errno, "bpf-restrict-fs: Failed to get BPF map fd: %m");
if (sym_bpf_map_delete_elem(fd, &u->cgroup_id) != 0 && errno != ENOENT)
if (sym_bpf_map_delete_elem(fd, &crt->cgroup_id) != 0 && errno != ENOENT)
return log_unit_debug_errno(u, errno, "bpf-restrict-fs: Failed to delete cgroup entry from LSM BPF map: %m");
return 0;
@@ -259,7 +265,7 @@ int bpf_restrict_fs_update(const Set *filesystems, uint64_t cgroup_id, int outer
return log_debug_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), "bpf-restrict-fs: Failed to restrict filesystems using LSM BPF: %m");
}
int bpf_restrict_fs_cleanup(const Unit *u) {
int bpf_restrict_fs_cleanup(Unit *u) {
return 0;
}

View File

@@ -17,7 +17,7 @@ typedef struct restrict_fs_bpf restrict_fs_bpf;
bool bpf_restrict_fs_supported(bool initialize);
int bpf_restrict_fs_setup(Manager *m);
int bpf_restrict_fs_update(const Set *filesystems, uint64_t cgroup_id, int outer_map_fd, bool allow_list);
int bpf_restrict_fs_cleanup(const Unit *u);
int bpf_restrict_fs_cleanup(Unit *u);
int bpf_restrict_fs_map_fd(Unit *u);
void bpf_restrict_fs_destroy(struct restrict_fs_bpf *prog);
int bpf_restrict_fs_parse_filesystem(const char *name, Set **filesystems, FilesystemParseFlags flags, const char *unit, const char *filename, unsigned line);

View File

@@ -103,13 +103,18 @@ static int restrict_ifaces_install_impl(Unit *u) {
_cleanup_free_ char *cgroup_path = NULL;
_cleanup_close_ int cgroup_fd = -EBADF;
CGroupContext *cc;
CGroupRuntime *crt;
int r;
cc = unit_get_cgroup_context(u);
if (!cc)
return 0;
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, NULL, &cgroup_path);
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, NULL, &cgroup_path);
if (r < 0)
return log_unit_error_errno(u, r, "restrict-interfaces: Failed to get cgroup path: %m");
@@ -137,30 +142,42 @@ static int restrict_ifaces_install_impl(Unit *u) {
if (r != 0)
return log_unit_error_errno(u, r, "restrict-interfaces: Failed to create egress cgroup link: %m");
u->restrict_ifaces_ingress_bpf_link = TAKE_PTR(ingress_link);
u->restrict_ifaces_egress_bpf_link = TAKE_PTR(egress_link);
crt->restrict_ifaces_ingress_bpf_link = TAKE_PTR(ingress_link);
crt->restrict_ifaces_egress_bpf_link = TAKE_PTR(egress_link);
return 0;
}
int bpf_restrict_ifaces_install(Unit *u) {
int r;
r = restrict_ifaces_install_impl(u);
fdset_close(u->initial_restric_ifaces_link_fds);
return r;
}
int bpf_restrict_ifaces_serialize(Unit *u, FILE *f, FDSet *fds) {
CGroupRuntime *crt;
int r;
assert(u);
r = bpf_serialize_link(f, fds, "restrict-ifaces-bpf-fd", u->restrict_ifaces_ingress_bpf_link);
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
r = restrict_ifaces_install_impl(u);
fdset_close(crt->initial_restric_ifaces_link_fds);
return r;
}
int bpf_restrict_ifaces_serialize(Unit *u, FILE *f, FDSet *fds) {
CGroupRuntime *crt;
int r;
assert(u);
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
r = bpf_serialize_link(f, fds, "restrict-ifaces-bpf-fd", crt->restrict_ifaces_ingress_bpf_link);
if (r < 0)
return r;
return bpf_serialize_link(f, fds, "restrict-ifaces-bpf-fd", u->restrict_ifaces_egress_bpf_link);
return bpf_serialize_link(f, fds, "restrict-ifaces-bpf-fd", crt->restrict_ifaces_egress_bpf_link);
}
int bpf_restrict_ifaces_add_initial_link_fd(Unit *u, int fd) {
@@ -168,13 +185,17 @@ int bpf_restrict_ifaces_add_initial_link_fd(Unit *u, int fd) {
assert(u);
if (!u->initial_restric_ifaces_link_fds) {
u->initial_restric_ifaces_link_fds = fdset_new();
if (!u->initial_restric_ifaces_link_fds)
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (!crt)
return -EINVAL;
if (!crt->initial_restric_ifaces_link_fds) {
crt->initial_restric_ifaces_link_fds = fdset_new();
if (!crt->initial_restric_ifaces_link_fds)
return log_oom();
}
r = fdset_put(u->initial_restric_ifaces_link_fds, fd);
r = fdset_put(crt->initial_restric_ifaces_link_fds, fd);
if (r < 0)
return log_unit_error_errno(u, r,
"restrict-interfaces: Failed to put restrict-ifaces-bpf-fd %d to restored fdset: %m", fd);

View File

@@ -139,13 +139,18 @@ int bpf_socket_bind_add_initial_link_fd(Unit *u, int fd) {
assert(u);
if (!u->initial_socket_bind_link_fds) {
u->initial_socket_bind_link_fds = fdset_new();
if (!u->initial_socket_bind_link_fds)
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (!crt)
return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
"Failed to get control group runtime object.");
if (!crt->initial_socket_bind_link_fds) {
crt->initial_socket_bind_link_fds = fdset_new();
if (!crt->initial_socket_bind_link_fds)
return log_oom();
}
r = fdset_put(u->initial_socket_bind_link_fds, fd);
r = fdset_put(crt->initial_socket_bind_link_fds, fd);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-socket-bind: Failed to put BPF fd %d to initial fdset", fd);
@@ -158,6 +163,7 @@ static int socket_bind_install_impl(Unit *u) {
_cleanup_free_ char *cgroup_path = NULL;
_cleanup_close_ int cgroup_fd = -EBADF;
CGroupContext *cc;
CGroupRuntime *crt;
int r;
assert(u);
@@ -166,7 +172,11 @@ static int socket_bind_install_impl(Unit *u) {
if (!cc)
return 0;
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, NULL, &cgroup_path);
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, crt->cgroup_path, NULL, &cgroup_path);
if (r < 0)
return log_unit_error_errno(u, r, "bpf-socket-bind: Failed to get cgroup path: %m");
@@ -193,35 +203,42 @@ static int socket_bind_install_impl(Unit *u) {
return log_unit_error_errno(u, r, "bpf-socket-bind: Failed to link '%s' cgroup-bpf program: %m",
sym_bpf_program__name(obj->progs.sd_bind6));
u->ipv4_socket_bind_link = TAKE_PTR(ipv4);
u->ipv6_socket_bind_link = TAKE_PTR(ipv6);
crt->ipv4_socket_bind_link = TAKE_PTR(ipv4);
crt->ipv6_socket_bind_link = TAKE_PTR(ipv6);
return 0;
}
int bpf_socket_bind_install(Unit *u) {
CGroupRuntime *crt;
int r;
assert(u);
r = socket_bind_install_impl(u);
if (r == -ENOMEM)
return r;
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
fdset_close(u->initial_socket_bind_link_fds);
r = socket_bind_install_impl(u);
fdset_close(crt->initial_socket_bind_link_fds);
return r;
}
int bpf_socket_bind_serialize(Unit *u, FILE *f, FDSet *fds) {
CGroupRuntime *crt;
int r;
assert(u);
r = bpf_serialize_link(f, fds, "ipv4-socket-bind-bpf-link", u->ipv4_socket_bind_link);
crt = unit_get_cgroup_runtime(u);
if (!crt)
return 0;
r = bpf_serialize_link(f, fds, "ipv4-socket-bind-bpf-link", crt->ipv4_socket_bind_link);
if (r < 0)
return r;
return bpf_serialize_link(f, fds, "ipv6-socket-bind-bpf-link", u->ipv6_socket_bind_link);
return bpf_serialize_link(f, fds, "ipv6-socket-bind-bpf-link", crt->ipv6_socket_bind_link);
}
#else /* ! BPF_FRAMEWORK */

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,9 @@
#include <stdbool.h>
#include "sd-event.h"
#include "bpf-program.h"
#include "bpf-restrict-fs.h"
#include "cgroup-util.h"
#include "cpu-set-util.h"
@@ -35,6 +38,7 @@ typedef struct CGroupBlockIODeviceWeight CGroupBlockIODeviceWeight;
typedef struct CGroupBlockIODeviceBandwidth CGroupBlockIODeviceBandwidth;
typedef struct CGroupBPFForeignProgram CGroupBPFForeignProgram;
typedef struct CGroupSocketBindItem CGroupSocketBindItem;
typedef struct CGroupRuntime CGroupRuntime;
typedef enum CGroupDevicePolicy {
/* When devices listed, will allow those, plus built-in ones, if none are listed will allow
@@ -131,7 +135,9 @@ typedef enum CGroupPressureWatch {
_CGROUP_PRESSURE_WATCH_INVALID = -EINVAL,
} CGroupPressureWatch;
/* When adding members make sure to update cgroup_context_copy() accordingly */
/* The user-supplied cgroup-related configuration options. This remains mostly immutable while the service
* manager is running (except for an occasional SetProperty() configuration change), outside of reload
* cycles. When adding members make sure to update cgroup_context_copy() accordingly. */
struct CGroupContext {
bool cpu_accounting;
bool io_accounting;
@@ -288,6 +294,86 @@ typedef enum CGroupLimitType {
_CGROUP_LIMIT_INVALID = -EINVAL,
} CGroupLimitType;
/* The dynamic, regular updated information about a unit that as a realized cgroup. This is only allocated when a unit is first realized */
typedef struct CGroupRuntime {
/* Where the cpu.stat or cpuacct.usage was at the time the unit was started */
nsec_t cpu_usage_base;
nsec_t cpu_usage_last; /* the most recently read value */
/* Most recently read value of memory accounting metrics */
uint64_t memory_accounting_last[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1];
/* The current counter of OOM kills initiated by systemd-oomd */
uint64_t managed_oom_kill_last;
/* The current counter of the oom_kill field in the memory.events cgroup attribute */
uint64_t oom_kill_last;
/* Where the io.stat data was at the time the unit was started */
uint64_t io_accounting_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX];
uint64_t io_accounting_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX]; /* the most recently read value */
/* Counterparts in the cgroup filesystem */
char *cgroup_path;
uint64_t cgroup_id;
CGroupMask cgroup_realized_mask; /* In which hierarchies does this unit's cgroup exist? (only relevant on cgroup v1) */
CGroupMask cgroup_enabled_mask; /* Which controllers are enabled (or more correctly: enabled for the children) for this unit's cgroup? (only relevant on cgroup v2) */
CGroupMask cgroup_invalidated_mask; /* A mask specifying controllers which shall be considered invalidated, and require re-realization */
CGroupMask cgroup_members_mask; /* A cache for the controllers required by all children of this cgroup (only relevant for slice units) */
/* Inotify watch descriptors for watching cgroup.events and memory.events on cgroupv2 */
int cgroup_control_inotify_wd;
int cgroup_memory_inotify_wd;
/* Device Controller BPF program */
BPFProgram *bpf_device_control_installed;
/* IP BPF Firewalling/accounting */
int ip_accounting_ingress_map_fd;
int ip_accounting_egress_map_fd;
uint64_t ip_accounting_extra[_CGROUP_IP_ACCOUNTING_METRIC_MAX];
int ipv4_allow_map_fd;
int ipv6_allow_map_fd;
int ipv4_deny_map_fd;
int ipv6_deny_map_fd;
BPFProgram *ip_bpf_ingress, *ip_bpf_ingress_installed;
BPFProgram *ip_bpf_egress, *ip_bpf_egress_installed;
Set *ip_bpf_custom_ingress;
Set *ip_bpf_custom_ingress_installed;
Set *ip_bpf_custom_egress;
Set *ip_bpf_custom_egress_installed;
/* BPF programs managed (e.g. loaded to kernel) by an entity external to systemd,
* attached to unit cgroup by provided program fd and attach type. */
Hashmap *bpf_foreign_by_key;
FDSet *initial_socket_bind_link_fds;
#if BPF_FRAMEWORK
/* BPF links to BPF programs attached to cgroup/bind{4|6} hooks and
* responsible for allowing or denying a unit to bind(2) to a socket
* address. */
struct bpf_link *ipv4_socket_bind_link;
struct bpf_link *ipv6_socket_bind_link;
#endif
FDSet *initial_restric_ifaces_link_fds;
#if BPF_FRAMEWORK
struct bpf_link *restrict_ifaces_ingress_bpf_link;
struct bpf_link *restrict_ifaces_egress_bpf_link;
#endif
bool cgroup_realized:1;
bool cgroup_members_mask_valid:1;
/* Reset cgroup accounting next time we fork something off */
bool reset_accounting:1;
/* Whether we warned about clamping the CPU quota period */
bool warned_clamping_cpu_quota_period:1;
} CGroupRuntime;
typedef struct Unit Unit;
typedef struct Manager Manager;
typedef enum ManagerState ManagerState;
@@ -360,6 +446,7 @@ int unit_watch_cgroup(Unit *u);
int unit_watch_cgroup_memory(Unit *u);
void unit_add_to_cgroup_realize_queue(Unit *u);
int unit_cgroup_is_empty(Unit *u);
void unit_release_cgroup(Unit *u);
/* Releases the cgroup only if it is recursively empty.
* Returns true if the cgroup was released, false otherwise. */
@@ -435,6 +522,16 @@ bool unit_cgroup_delegate(Unit *u);
int unit_get_cpuset(Unit *u, CPUSet *cpus, const char *name);
int unit_cgroup_freezer_action(Unit *u, FreezerAction action);
const char* freezer_action_to_string(FreezerAction a) _const_;
FreezerAction freezer_action_from_string(const char *s) _pure_;
CGroupRuntime *cgroup_runtime_new(void);
CGroupRuntime *cgroup_runtime_free(CGroupRuntime *crt);
DEFINE_TRIVIAL_CLEANUP_FUNC(CGroupRuntime*, cgroup_runtime_free);
int cgroup_runtime_serialize(Unit *u, FILE *f, FDSet *fds);
int cgroup_runtime_deserialize_one(Unit *u, const char *key, const char *value, FDSet *fds);
const char* cgroup_pressure_watch_to_string(CGroupPressureWatch a) _const_;
CGroupPressureWatch cgroup_pressure_watch_from_string(const char *s) _pure_;

View File

@@ -69,6 +69,10 @@ static int build_managed_oom_json_array_element(Unit *u, const char *property, J
if (!c)
return -EINVAL;
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (!crt)
return -EINVAL;
if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
/* systemd-oomd should always treat inactive units as though they didn't enable any action since they
* should not have a valid cgroup */
@@ -83,19 +87,24 @@ static int build_managed_oom_json_array_element(Unit *u, const char *property, J
return json_build(ret_v, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("mode", JSON_BUILD_STRING(mode)),
JSON_BUILD_PAIR("path", JSON_BUILD_STRING(u->cgroup_path)),
JSON_BUILD_PAIR("path", JSON_BUILD_STRING(crt->cgroup_path)),
JSON_BUILD_PAIR("property", JSON_BUILD_STRING(property)),
JSON_BUILD_PAIR_CONDITION(use_limit, "limit", JSON_BUILD_UNSIGNED(c->moom_mem_pressure_limit))));
}
int manager_varlink_send_managed_oom_update(Unit *u) {
_cleanup_(json_variant_unrefp) JsonVariant *arr = NULL, *v = NULL;
CGroupRuntime *crt;
CGroupContext *c;
int r;
assert(u);
if (!UNIT_VTABLE(u)->can_set_managed_oom || !u->manager || !u->cgroup_path)
if (!UNIT_VTABLE(u)->can_set_managed_oom || !u->manager)
return 0;
crt = unit_get_cgroup_runtime(u);
if (!crt || !crt->cgroup_path)
return 0;
if (MANAGER_IS_SYSTEM(u->manager)) {

View File

@@ -1300,7 +1300,9 @@ int bus_cgroup_set_property(
if (!UNIT_WRITE_FLAGS_NOOP(flags)) {
c->cpu_quota_per_sec_usec = u64;
u->warned_clamping_cpu_quota_period = false;
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (crt)
crt->warned_clamping_cpu_quota_period = false;
unit_invalidate_cgroup(u, CGROUP_MASK_CPU);
if (c->cpu_quota_per_sec_usec == USEC_INFINITY)
@@ -1324,7 +1326,9 @@ int bus_cgroup_set_property(
if (!UNIT_WRITE_FLAGS_NOOP(flags)) {
c->cpu_quota_period_usec = u64;
u->warned_clamping_cpu_quota_period = false;
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (crt)
crt->warned_clamping_cpu_quota_period = false;
unit_invalidate_cgroup(u, CGROUP_MASK_CPU);
if (c->cpu_quota_period_usec == USEC_INFINITY)
unit_write_setting(u, flags, "CPUQuotaPeriodSec", "CPUQuotaPeriodSec=");

View File

@@ -1216,12 +1216,32 @@ static int property_get_cgroup(
* indicates the root cgroup, which we report as "/". c) all
* other cases we report as-is. */
if (u->cgroup_path)
t = empty_to_root(u->cgroup_path);
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (crt && crt->cgroup_path)
t = empty_to_root(crt->cgroup_path);
return sd_bus_message_append(reply, "s", t);
}
static int property_get_cgroup_id(
sd_bus *bus,
const char *path,
const char *interface,
const char *property,
sd_bus_message *reply,
void *userdata,
sd_bus_error *error) {
Unit *u = ASSERT_PTR(userdata);
assert(bus);
assert(reply);
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
return sd_bus_message_append(reply, "t", crt ? crt->cgroup_id : 0);
}
static int append_process(sd_bus_message *reply, const char *p, PidRef *pid, Set *pids) {
_cleanup_free_ char *buf = NULL, *cmdline = NULL;
int r;
@@ -1350,8 +1370,10 @@ int bus_unit_method_get_processes(sd_bus_message *message, void *userdata, sd_bu
if (r < 0)
return r;
if (u->cgroup_path) {
r = append_cgroup(reply, u->cgroup_path, pids);
CGroupRuntime *crt;
crt = unit_get_cgroup_runtime(u);
if (crt && crt->cgroup_path) {
r = append_cgroup(reply, crt->cgroup_path, pids);
if (r < 0)
return r;
}
@@ -1558,7 +1580,7 @@ const sd_bus_vtable bus_unit_cgroup_vtable[] = {
SD_BUS_VTABLE_START(0),
SD_BUS_PROPERTY("Slice", "s", property_get_slice, 0, 0),
SD_BUS_PROPERTY("ControlGroup", "s", property_get_cgroup, 0, 0),
SD_BUS_PROPERTY("ControlGroupId", "t", NULL, offsetof(Unit, cgroup_id), 0),
SD_BUS_PROPERTY("ControlGroupId", "t", property_get_cgroup_id, 0, 0),
SD_BUS_PROPERTY("MemoryCurrent", "t", property_get_current_memory, 0, 0),
SD_BUS_PROPERTY("MemoryPeak", "t", property_get_memory_accounting, 0, 0),
SD_BUS_PROPERTY("MemorySwapCurrent", "t", property_get_memory_accounting, 0, 0),

View File

@@ -240,6 +240,7 @@ static void mount_done(Unit *u) {
mount_parameters_done(&m->parameters_fragment);
m->exec_runtime = exec_runtime_free(m->exec_runtime);
exec_command_done_array(m->exec_command, _MOUNT_EXEC_COMMAND_MAX);
m->control_command = NULL;
@@ -815,8 +816,10 @@ static int mount_coldplug(Unit *u) {
return r;
}
if (!IN_SET(m->deserialized_state, MOUNT_DEAD, MOUNT_FAILED))
if (!IN_SET(m->deserialized_state, MOUNT_DEAD, MOUNT_FAILED)) {
(void) unit_setup_exec_runtime(u);
(void) unit_setup_cgroup_runtime(u);
}
mount_set_state(m, m->deserialized_state);
return 0;
@@ -1332,7 +1335,9 @@ static void mount_cycle_clear(Mount *m) {
m->result = MOUNT_SUCCESS;
m->reload_result = MOUNT_SUCCESS;
exec_command_reset_status_array(m->exec_command, _MOUNT_EXEC_COMMAND_MAX);
UNIT(m)->reset_accounting = true;
if (m->cgroup_runtime)
m->cgroup_runtime->reset_accounting = true;
}
static int mount_start(Unit *u) {
@@ -2448,6 +2453,7 @@ const UnitVTable mount_vtable = {
.cgroup_context_offset = offsetof(Mount, cgroup_context),
.kill_context_offset = offsetof(Mount, kill_context),
.exec_runtime_offset = offsetof(Mount, exec_runtime),
.cgroup_runtime_offset = offsetof(Mount, cgroup_runtime),
.sections =
"Unit\0"

View File

@@ -79,6 +79,7 @@ struct Mount {
CGroupContext cgroup_context;
ExecRuntime *exec_runtime;
CGroupRuntime *cgroup_runtime;
MountState state, deserialized_state;

View File

@@ -353,6 +353,9 @@ static int scope_enter_start_chown(Scope *s) {
assert(s);
assert(s->user);
if (!s->cgroup_runtime)
return -EINVAL;
r = scope_arm_timer(s, /* relative= */ true, u->manager->defaults.timeout_start_usec);
if (r < 0)
return r;
@@ -385,7 +388,7 @@ static int scope_enter_start_chown(Scope *s) {
}
}
r = cg_set_access(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, uid, gid);
r = cg_set_access(SYSTEMD_CGROUP_CONTROLLER, s->cgroup_runtime->cgroup_path, uid, gid);
if (r < 0) {
log_unit_error_errno(UNIT(s), r, "Failed to adjust control group access: %m");
_exit(EXIT_CGROUP);
@@ -776,6 +779,7 @@ const UnitVTable scope_vtable = {
.object_size = sizeof(Scope),
.cgroup_context_offset = offsetof(Scope, cgroup_context),
.kill_context_offset = offsetof(Scope, kill_context),
.cgroup_runtime_offset = offsetof(Scope, cgroup_runtime),
.sections =
"Unit\0"

View File

@@ -21,6 +21,7 @@ struct Scope {
CGroupContext cgroup_context;
KillContext kill_context;
CGroupRuntime *cgroup_runtime;
ScopeState state, deserialized_state;
ScopeResult result;

View File

@@ -460,6 +460,7 @@ static void service_done(Unit *u) {
s->status_text = mfree(s->status_text);
s->exec_runtime = exec_runtime_free(s->exec_runtime);
exec_command_free_array(s->exec_command, _SERVICE_EXEC_COMMAND_MAX);
s->control_command = NULL;
s->main_command = NULL;
@@ -1345,6 +1346,7 @@ static int service_coldplug(Unit *u) {
SERVICE_DEAD_RESOURCES_PINNED)) {
(void) unit_enqueue_rewatch_pids(u);
(void) unit_setup_exec_runtime(u);
(void) unit_setup_cgroup_runtime(u);
}
if (IN_SET(s->deserialized_state, SERVICE_START_POST, SERVICE_RUNNING, SERVICE_RELOAD, SERVICE_RELOAD_SIGNAL, SERVICE_RELOAD_NOTIFY))
@@ -1885,10 +1887,10 @@ static int cgroup_good(Service *s) {
/* Returns 0 if the cgroup is empty or doesn't exist, > 0 if it is exists and is populated, < 0 if we can't
* figure it out */
if (!UNIT(s)->cgroup_path)
if (!s->cgroup_runtime || !s->cgroup_runtime->cgroup_path)
return 0;
r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, UNIT(s)->cgroup_path);
r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, s->cgroup_runtime->cgroup_path);
if (r < 0)
return r;
@@ -2773,7 +2775,9 @@ static int service_start(Unit *u) {
s->flush_n_restarts = false;
}
u->reset_accounting = true;
CGroupRuntime *crt = unit_get_cgroup_runtime(u);
if (crt)
crt->reset_accounting = true;
service_enter_condition(s);
return 1;
@@ -5149,6 +5153,7 @@ const UnitVTable service_vtable = {
.cgroup_context_offset = offsetof(Service, cgroup_context),
.kill_context_offset = offsetof(Service, kill_context),
.exec_runtime_offset = offsetof(Service, exec_runtime),
.cgroup_runtime_offset = offsetof(Service, cgroup_runtime),
.sections =
"Unit\0"

View File

@@ -168,6 +168,8 @@ struct Service {
/* Runtime data of the execution context */
ExecRuntime *exec_runtime;
CGroupRuntime *cgroup_runtime;
PidRef main_pid, control_pid;
/* if we are a socket activated service instance, store information of the connection/peer/socket */

View File

@@ -399,6 +399,7 @@ static int slice_freezer_action(Unit *s, FreezerAction action) {
const UnitVTable slice_vtable = {
.object_size = sizeof(Slice),
.cgroup_context_offset = offsetof(Slice, cgroup_context),
.cgroup_runtime_offset = offsetof(Slice, cgroup_runtime),
.sections =
"Unit\0"

View File

@@ -11,6 +11,8 @@ struct Slice {
SliceState state, deserialized_state;
CGroupContext cgroup_context;
CGroupRuntime *cgroup_runtime;
};
extern const UnitVTable slice_vtable;

View File

@@ -167,6 +167,7 @@ static void socket_done(Unit *u) {
s->peers_by_address = set_free(s->peers_by_address);
s->exec_runtime = exec_runtime_free(s->exec_runtime);
exec_command_free_array(s->exec_command, _SOCKET_EXEC_COMMAND_MAX);
s->control_command = NULL;
@@ -2473,7 +2474,8 @@ static int socket_start(Unit *u) {
s->result = SOCKET_SUCCESS;
exec_command_reset_status_list_array(s->exec_command, _SOCKET_EXEC_COMMAND_MAX);
u->reset_accounting = true;
if (s->cgroup_runtime)
s->cgroup_runtime->reset_accounting = true;
socket_enter_start_pre(s);
return 1;
@@ -3528,6 +3530,7 @@ const UnitVTable socket_vtable = {
.cgroup_context_offset = offsetof(Socket, cgroup_context),
.kill_context_offset = offsetof(Socket, kill_context),
.exec_runtime_offset = offsetof(Socket, exec_runtime),
.cgroup_runtime_offset = offsetof(Socket, cgroup_runtime),
.sections =
"Unit\0"

Some files were not shown because too many files have changed in this diff Show More