netfilter: xt_qtaguid: 1st pass at tracking tag based data resources

* Added global resource tracking based on tags.
 - Can be put into passive mode via
    /sys/modules/xt_qtaguid/params/tag_tracking_passive
 - The number of socket tags per UID is now limited
 - Adding /dev/xt_qtaguid that each process should open before starting
to tag sockets. A later change will make it a "must".
 - A process should not create new tags unless it has the dev open.
  A later change will make it a must.
 - On qtaguid_resources release, the process' matching socket tag info
  is deleted.
* Support run-time debug mask via /sys/modules parameter "debug_mask".
* split module into prettyprinting code, includes, main.
* Removed ptrdiff_t usage which didn't work in all cases.

Change-Id: I4a21d3bea55d23c1c3747253904e2a79f7d555d9
Signed-off-by: JP Abgrall <jpa@google.com>
This commit is contained in:
JP Abgrall
2011-09-09 01:55:24 -07:00
parent d18e4b80d1
commit f7d29b6435
5 changed files with 1534 additions and 298 deletions

View File

@@ -95,7 +95,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,305 @@
/*
* Kernel iptables module to track stats for packets based on user tags.
*
* (C) 2011 Google, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __XT_QTAGUID_INTERNAL_H__
#define __XT_QTAGUID_INTERNAL_H__
#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/spinlock_types.h>
#include <linux/workqueue.h>
/* Define/comment out these *DEBUG to compile in/out the pr_debug calls. */
/* Iface handling */
#define IDEBUG
/* Iptable Matching. Per packet. */
#define MDEBUG
/* Red-black tree handling. Per packet. */
#define RDEBUG
/* procfs ctrl/stats handling */
#define CDEBUG
/* dev and resource tracking */
#define DDEBUG
/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
#define DEFAULT_DEBUG_MASK 0
#define IDEBUG_MASK (1<<0)
#define MDEBUG_MASK (1<<1)
#define RDEBUG_MASK (1<<2)
#define CDEBUG_MASK (1<<3)
#define DDEBUG_MASK (1<<4)
#define MSK_DEBUG(mask, ...) do { \
if (unlikely(debug_mask & (mask))) \
pr_debug(__VA_ARGS__); \
} while (0)
#ifdef IDEBUG
#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
#else
#define IF_DEBUG(...) no_printk(__VA_ARGS__)
#endif
#ifdef MDEBUG
#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
#else
#define MT_DEBUG(...) no_printk(__VA_ARGS__)
#endif
#ifdef RDEBUG
#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
#else
#define RB_DEBUG(...) no_printk(__VA_ARGS__)
#endif
#ifdef CDEBUG
#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
#else
#define CT_DEBUG(...) no_printk(__VA_ARGS__)
#endif
#ifdef DDEBUG
#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
#else
#define DR_DEBUG(...) no_printk(__VA_ARGS__)
#endif
extern uint debug_mask;
/*---------------------------------------------------------------------------*/
/*
* Tags:
*
* They represent what the data usage counters will be tracked against.
* By default a tag is just based on the UID.
* The UID is used as the base for policing, and can not be ignored.
* So a tag will always at least represent a UID (uid_tag).
*
* A tag can be augmented with an "accounting tag" which is associated
* with a UID.
* User space can set the acct_tag portion of the tag which is then used
* with sockets: all data belonging to that socket will be counted against the
* tag. The policing is then based on the tag's uid_tag portion,
* and stats are collected for the acct_tag portion separately.
*
* There could be
* a: {acct_tag=1, uid_tag=10003}
* b: {acct_tag=2, uid_tag=10003}
* c: {acct_tag=3, uid_tag=10003}
* d: {acct_tag=0, uid_tag=10003}
* a, b, and c represent tags associated with specific sockets.
* d is for the totals for that uid, including all untagged traffic.
* Typically d is used with policing/quota rules.
*
* We want tag_t big enough to distinguish uid_t and acct_tag.
* It might become a struct if needed.
* Nothing should be using it as an int.
*/
typedef uint64_t tag_t; /* Only used via accessors */
#define TAG_UID_MASK 0xFFFFFFFFULL
#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
static inline int tag_compare(tag_t t1, tag_t t2)
{
return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
}
static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
{
return acct_tag | uid;
}
static inline tag_t make_tag_from_uid(uid_t uid)
{
return uid;
}
static inline uid_t get_uid_from_tag(tag_t tag)
{
return tag & TAG_UID_MASK;
}
static inline tag_t get_utag_from_tag(tag_t tag)
{
return tag & TAG_UID_MASK;
}
static inline tag_t get_atag_from_tag(tag_t tag)
{
return tag & TAG_ACCT_MASK;
}
static inline bool valid_atag(tag_t tag)
{
return !(tag & TAG_UID_MASK);
}
static inline tag_t make_atag_from_value(uint32_t value)
{
return (uint64_t)value << 32;
}
/*---------------------------------------------------------------------------*/
/*
* Maximum number of socket tags that a UID is allowed to have active.
* Multiple processes belonging to the same UID contribute towards this limit.
* Special UIDs that can impersonate a UID also contribute (e.g. download
* manager, ...)
*/
#define DEFAULT_MAX_SOCK_TAGS 1024
/*
* For now we only track 2 sets of counters.
* The default set is 0.
* Userspace can activate another set for a given uid being tracked.
*/
#define IFS_MAX_COUNTER_SETS 2
enum ifs_tx_rx {
IFS_TX,
IFS_RX,
IFS_MAX_DIRECTIONS
};
/* For now, TCP, UDP, the rest */
enum ifs_proto {
IFS_TCP,
IFS_UDP,
IFS_PROTO_OTHER,
IFS_MAX_PROTOS
};
struct byte_packet_counters {
uint64_t bytes;
uint64_t packets;
};
struct data_counters {
struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
};
/* Generic X based nodes used as a base for rb_tree ops */
struct tag_node {
struct rb_node node;
tag_t tag;
};
struct tag_stat {
struct tag_node tn;
struct data_counters counters;
/*
* If this tag is acct_tag based, we need to count against the
* matching parent uid_tag.
*/
struct data_counters *parent_counters;
};
struct iface_stat {
struct list_head list; /* in iface_stat_list */
char *ifname;
uint64_t rx_bytes;
uint64_t rx_packets;
uint64_t tx_bytes;
uint64_t tx_packets;
bool active;
struct proc_dir_entry *proc_ptr;
struct rb_root tag_stat_tree;
spinlock_t tag_stat_list_lock;
};
/* This is needed to create proc_dir_entries from atomic context. */
struct iface_stat_work {
struct work_struct iface_work;
struct iface_stat *iface_entry;
};
/*
* Track tag that this socket is transferring data for, and not necessarily
* the uid that owns the socket.
* This is the tag against which tag_stat.counters will be billed.
* These structs need to be looked up by sock and pid.
*/
struct sock_tag {
struct rb_node sock_node;
struct sock *sk; /* Only used as a number, never dereferenced */
/* The socket is needed for sockfd_put() */
struct socket *socket;
/* Used to associate with a given pid */
struct list_head list; /* in proc_qtu_data.sock_tag_list */
pid_t pid;
tag_t tag;
};
struct qtaguid_event_counts {
/* Various successful events */
atomic64_t sockets_tagged;
atomic64_t sockets_untagged;
atomic64_t counter_set_changes;
atomic64_t delete_cmds;
atomic64_t iface_events; /* Number of NETDEV_* events handled */
/*
* match_found_sk_*: numbers related to the netfilter matching
* function finding a sock for the sk_buff.
*/
atomic64_t match_found_sk; /* An sk was already in the sk_buff. */
/* The connection tracker had the sk. */
atomic64_t match_found_sk_in_ct;
/*
* No sk could be found. No apparent owner. Could happen with
* unsolicited traffic.
*/
atomic64_t match_found_sk_none;
};
/* Track the set active_set for the given tag. */
struct tag_counter_set {
struct tag_node tn;
int active_set;
};
/*----------------------------------------------*/
/*
* The qtu uid data is used to track resources that are created directly or
* indirectly by processes (uid tracked).
* It is shared by the processes with the same uid.
* Some of the resource will be counted to prevent further rogue allocations,
* some will need freeing once the owner process (uid) exits.
*/
struct uid_tag_data {
struct rb_node node;
uid_t uid;
/*
* For the uid, how many accounting tags have been set.
*/
int num_active_tags;
struct rb_root tag_ref_tree;
/* No tag_node_tree_lock; use uid_tag_data_tree_lock */
};
struct tag_ref {
struct tag_node tn;
/*
* This tracks the number of active sockets that have a tag on them
* which matches this tag_ref.tn.tag.
* A tag ref can live on after the sockets are untagged.
* A tag ref can only be removed during a tag delete command.
*/
int num_sock_tags;
};
struct proc_qtu_data {
struct rb_node node;
pid_t pid;
struct uid_tag_data *parent_tag_data;
/* Tracks the sock_tags that need freeing upon this proc's death */
struct list_head sock_tag_list;
/* No spinlock_t sock_tag_list_lock; use the global one. */
};
/*----------------------------------------------*/
#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */

View File

@@ -0,0 +1,397 @@
/*
* Pretty printing Support for iptables xt_qtaguid module.
*
* (C) 2011 Google, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* There are run-time debug flags enabled via the debug_mask module param, or
* via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
*/
#define DEBUG
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/net.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
#include "xt_qtaguid_internal.h"
#include "xt_qtaguid_print.h"
char *pp_tag_t(tag_t *tag)
{
if (!tag)
return kasprintf(GFP_ATOMIC, "tag_t@null{}");
return kasprintf(GFP_ATOMIC,
"tag_t@%p{tag=0x%llx, uid=%u}",
tag, *tag, get_uid_from_tag(*tag));
}
char *pp_data_counters(struct data_counters *dc, bool showValues)
{
if (!dc)
return kasprintf(GFP_ATOMIC, "data_counters@null{}");
if (showValues)
return kasprintf(
GFP_ATOMIC, "data_counters@%p{"
"set0{"
"rx{"
"tcp{b=%llu, p=%llu}, "
"udp{b=%llu, p=%llu},"
"other{b=%llu, p=%llu}}, "
"tx{"
"tcp{b=%llu, p=%llu}, "
"udp{b=%llu, p=%llu},"
"other{b=%llu, p=%llu}}}, "
"set1{"
"rx{"
"tcp{b=%llu, p=%llu}, "
"udp{b=%llu, p=%llu},"
"other{b=%llu, p=%llu}}, "
"tx{"
"tcp{b=%llu, p=%llu}, "
"udp{b=%llu, p=%llu},"
"other{b=%llu, p=%llu}}}}",
dc,
dc->bpc[0][IFS_RX][IFS_TCP].bytes,
dc->bpc[0][IFS_RX][IFS_TCP].packets,
dc->bpc[0][IFS_RX][IFS_UDP].bytes,
dc->bpc[0][IFS_RX][IFS_UDP].packets,
dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
dc->bpc[0][IFS_TX][IFS_TCP].bytes,
dc->bpc[0][IFS_TX][IFS_TCP].packets,
dc->bpc[0][IFS_TX][IFS_UDP].bytes,
dc->bpc[0][IFS_TX][IFS_UDP].packets,
dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
dc->bpc[1][IFS_RX][IFS_TCP].bytes,
dc->bpc[1][IFS_RX][IFS_TCP].packets,
dc->bpc[1][IFS_RX][IFS_UDP].bytes,
dc->bpc[1][IFS_RX][IFS_UDP].packets,
dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
dc->bpc[1][IFS_TX][IFS_TCP].bytes,
dc->bpc[1][IFS_TX][IFS_TCP].packets,
dc->bpc[1][IFS_TX][IFS_UDP].bytes,
dc->bpc[1][IFS_TX][IFS_UDP].packets,
dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
else
return kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
}
char *pp_tag_node(struct tag_node *tn)
{
char *tag_str;
char *res;
if (!tn)
return kasprintf(GFP_ATOMIC, "tag_node@null{}");
tag_str = pp_tag_t(&tn->tag);
res = kasprintf(GFP_ATOMIC,
"tag_node@%p{tag=%s}",
tn, tag_str);
kfree(tag_str);
return res;
}
char *pp_tag_ref(struct tag_ref *tr)
{
char *tn_str;
char *res;
if (!tr)
return kasprintf(GFP_ATOMIC, "tag_ref@null{}");
tn_str = pp_tag_node(&tr->tn);
res = kasprintf(GFP_ATOMIC,
"tag_ref@%p{%s, num_sock_tags=%d}",
tr, tn_str, tr->num_sock_tags);
kfree(tn_str);
return res;
}
char *pp_tag_stat(struct tag_stat *ts)
{
char *tn_str;
char *counters_str;
char *parent_counters_str;
char *res;
if (!ts)
return kasprintf(GFP_ATOMIC, "tag_stat@null{}");
tn_str = pp_tag_node(&ts->tn);
counters_str = pp_data_counters(&ts->counters, true);
parent_counters_str = pp_data_counters(ts->parent_counters, false);
res = kasprintf(GFP_ATOMIC,
"tag_stat@%p{%s, counters=%s, parent_counters=%s}",
ts, tn_str, counters_str, parent_counters_str);
kfree(tn_str);
kfree(counters_str);
kfree(parent_counters_str);
return res;
}
char *pp_iface_stat(struct iface_stat *is)
{
if (!is)
return kasprintf(GFP_ATOMIC, "iface_stat@null{}");
return kasprintf(GFP_ATOMIC, "iface_stat@%p{"
"list=list_head{...}, "
"ifname=%s, "
"rx_bytes=%llu, "
"rx_packets=%llu, "
"tx_bytes=%llu, "
"tx_packets=%llu, "
"active=%d, "
"proc_ptr=%p, "
"tag_stat_tree=rb_root{...}}",
is,
is->ifname,
is->rx_bytes,
is->rx_packets,
is->tx_bytes,
is->tx_packets,
is->active,
is->proc_ptr);
}
char *pp_sock_tag(struct sock_tag *st)
{
char *tag_str;
char *res;
if (!st)
return kasprintf(GFP_ATOMIC, "sock_tag@null{}");
tag_str = pp_tag_t(&st->tag);
res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
"sock_node=rb_node{...}, "
"sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
"pid=%u, tag=%s}",
st, st->sk, st->socket, atomic_long_read(
&st->socket->file->f_count),
st->pid, tag_str);
kfree(tag_str);
return res;
}
char *pp_uid_tag_data(struct uid_tag_data *utd)
{
char *res;
if (!utd)
return kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
"uid=%u, num_active_acct_tags=%d, "
"tag_node_tree=rb_root{...}, "
"proc_qtu_data_tree=rb_root{...}}",
utd, utd->uid,
utd->num_active_tags);
return res;
}
char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
{
char *parent_tag_data_str;
char *res;
if (!pqd)
return kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
"node=rb_node{...}, pid=%u, "
"parent_tag_data=%s, "
"sock_tag_list=list_head{...}}",
pqd, pqd->pid, parent_tag_data_str
);
kfree(parent_tag_data_str);
return res;
}
/*------------------------------------------*/
void prdebug_sock_tag_tree(int indent_level,
struct rb_root *sock_tag_tree)
{
struct rb_node *node;
struct sock_tag *sock_tag_entry;
char *str;
str = "sock_tag_tree=rb_root{";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
indent_level++;
for (node = rb_first(sock_tag_tree);
node;
node = rb_next(node)) {
sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
str = pp_sock_tag(sock_tag_entry);
CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, str);
kfree(str);
}
indent_level--;
str = "}";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
}
void prdebug_sock_tag_list(int indent_level,
struct list_head *sock_tag_list)
{
struct sock_tag *sock_tag_entry;
char *str;
str = "sock_tag_list=list_head{";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
indent_level++;
list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
str = pp_sock_tag(sock_tag_entry);
CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, str);
kfree(str);
}
indent_level--;
str = "}";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
}
void prdebug_proc_qtu_data_tree(int indent_level,
struct rb_root *proc_qtu_data_tree)
{
char *str;
struct rb_node *node;
struct proc_qtu_data *proc_qtu_data_entry;
str = "proc_qtu_data_tree=rb_root{";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
indent_level++;
for (node = rb_first(proc_qtu_data_tree);
node;
node = rb_next(node)) {
proc_qtu_data_entry = rb_entry(node,
struct proc_qtu_data,
node);
str = pp_proc_qtu_data(proc_qtu_data_entry);
CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level,
str);
kfree(str);
indent_level++;
prdebug_sock_tag_list(indent_level,
&proc_qtu_data_entry->sock_tag_list);
indent_level--;
}
indent_level--;
str = "}";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
}
void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
{
char *str;
struct rb_node *node;
struct tag_ref *tag_ref_entry;
str = "tag_ref_tree{";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
indent_level++;
for (node = rb_first(tag_ref_tree);
node;
node = rb_next(node)) {
tag_ref_entry = rb_entry(node,
struct tag_ref,
tn.node);
str = pp_tag_ref(tag_ref_entry);
CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level,
str);
kfree(str);
}
indent_level--;
str = "}";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
}
void prdebug_uid_tag_data_tree(int indent_level,
struct rb_root *uid_tag_data_tree)
{
char *str;
struct rb_node *node;
struct uid_tag_data *uid_tag_data_entry;
str = "uid_tag_data_tree=rb_root{";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
indent_level++;
for (node = rb_first(uid_tag_data_tree);
node;
node = rb_next(node)) {
uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
node);
str = pp_uid_tag_data(uid_tag_data_entry);
CT_DEBUG("%*d: %s,\n", indent_level*2, indent_level, str);
kfree(str);
if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
indent_level++;
prdebug_tag_ref_tree(indent_level,
&uid_tag_data_entry->tag_ref_tree);
indent_level--;
}
}
indent_level--;
str = "}";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
}
void prdebug_tag_stat_tree(int indent_level,
struct rb_root *tag_stat_tree)
{
char *str;
struct rb_node *node;
struct tag_stat *ts_entry;
str = "tag_stat_tree{";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
indent_level++;
for (node = rb_first(tag_stat_tree);
node;
node = rb_next(node)) {
ts_entry = rb_entry(node, struct tag_stat, tn.node);
str = pp_tag_stat(ts_entry);
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level,
str);
kfree(str);
}
indent_level--;
str = "}";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
}
void prdebug_iface_stat_list(int indent_level,
struct list_head *iface_stat_list)
{
char *str;
struct iface_stat *iface_entry;
str = "iface_stat_list=list_head{";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
indent_level++;
list_for_each_entry(iface_entry, iface_stat_list, list) {
str = pp_iface_stat(iface_entry);
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
kfree(str);
spin_lock_bh(&iface_entry->tag_stat_list_lock);
if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
indent_level++;
prdebug_tag_stat_tree(indent_level,
&iface_entry->tag_stat_tree);
indent_level--;
}
spin_unlock_bh(&iface_entry->tag_stat_list_lock);
}
indent_level--;
str = "}";
CT_DEBUG("%*d: %s\n", indent_level*2, indent_level, str);
}

View File

@@ -0,0 +1,39 @@
/*
* Pretty printing Support for iptables xt_qtaguid module.
*
* (C) 2011 Google, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __XT_QTAGUID_PRINT_H__
#define __XT_QTAGUID_PRINT_H__
#include "xt_qtaguid_internal.h"
char *pp_tag_t(tag_t *tag);
char *pp_data_counters(struct data_counters *dc, bool showValues);
char *pp_tag_node(struct tag_node *tn);
char *pp_tag_ref(struct tag_ref *tr);
char *pp_tag_stat(struct tag_stat *ts);
char *pp_iface_stat(struct iface_stat *is);
char *pp_sock_tag(struct sock_tag *st);
char *pp_uid_tag_data(struct uid_tag_data *qtd);
char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
/*------------------------------------------*/
void prdebug_sock_tag_list(int indent_level,
struct list_head *sock_tag_list);
void prdebug_sock_tag_tree(int indent_level,
struct rb_root *sock_tag_tree);
void prdebug_proc_qtu_data_tree(int indent_level,
struct rb_root *proc_qtu_data_tree);
void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
void prdebug_uid_tag_data_tree(int indent_level,
struct rb_root *uid_tag_data_tree);
void prdebug_tag_stat_tree(int indent_level,
struct rb_root *tag_stat_tree);
void prdebug_iface_stat_list(int indent_level,
struct list_head *iface_stat_list);
#endif /* ifndef __XT_QTAGUID_PRINT_H__ */