mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says:
====================
Netfilter updates for net-next
The following patchset contains Netfilter updates for net-next. This
includes one patch to update ovs and act_ct to use nf_ct_put() instead
of nf_conntrack_put().
1) Add netns_tracker to nfnetlink_log and masquerade, from Eric Dumazet.
2) Remove redundant rcu read-size lock in nf_tables packet path.
3) Replace BUG() by WARN_ON_ONCE() in nft_payload.
4) Consolidate rule verdict tracing.
5) Replace WARN_ON() by WARN_ON_ONCE() in nf_tables core.
6) Make counter support built-in in nf_tables.
7) Add new field to conntrack object to identify locally generated
traffic, from Florian Westphal.
8) Prevent NAT from shadowing well-known ports, from Florian Westphal.
9) Merge nf_flow_table_{ipv4,ipv6} into nf_flow_table_inet, also from
Florian.
10) Remove redundant pointer in nft_pipapo AVX2 support, from Colin Ian King.
11) Replace opencoded max() in conntrack, from Jiapeng Chong.
12) Update conntrack to use refcount_t API, from Florian Westphal.
13) Move ip_ct_attach indirection into the nf_ct_hook structure.
14) Constify several pointer object in the netfilter codebase,
from Florian Westphal.
15) Tree-wide replacement of nf_conntrack_put() by nf_ct_put(), also
from Florian.
16) Fix egress splat due to incorrect rcu notation, from Florian.
17) Move stateful fields of connlimit, last, quota, numgen and limit
out of the expression data area.
18) Build a blob to represent the ruleset in nf_tables, this is a
requirement of the new register tracking infrastructure.
19) Add NFT_REG32_NUM to define the maximum number of 32-bit registers.
20) Add register tracking infrastructure to skip redundant
store-to-register operations, this includes support for payload,
meta and bitwise expresssions.
* git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next: (32 commits)
netfilter: nft_meta: cancel register tracking after meta update
netfilter: nft_payload: cancel register tracking after payload update
netfilter: nft_bitwise: track register operations
netfilter: nft_meta: track register operations
netfilter: nft_payload: track register operations
netfilter: nf_tables: add register tracking infrastructure
netfilter: nf_tables: add NFT_REG32_NUM
netfilter: nf_tables: add rule blob layout
netfilter: nft_limit: move stateful fields out of expression data
netfilter: nft_limit: rename stateful structure
netfilter: nft_numgen: move stateful fields out of expression data
netfilter: nft_quota: move stateful fields out of expression data
netfilter: nft_last: move stateful fields out of expression data
netfilter: nft_connlimit: move stateful fields out of expression data
netfilter: egress: avoid a lockdep splat
net: prefer nf_ct_put instead of nf_conntrack_put
netfilter: conntrack: avoid useless indirection during conntrack destruction
netfilter: make function op structures const
netfilter: core: move ip_ct_attach indirection to struct nf_ct_hook
netfilter: conntrack: convert to refcount_t api
...
====================
Link: https://lore.kernel.org/r/20220109231640.104123-1-pablo@netfilter.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -381,13 +381,13 @@ struct nf_nat_hook {
|
||||
enum ip_conntrack_dir dir);
|
||||
};
|
||||
|
||||
extern struct nf_nat_hook __rcu *nf_nat_hook;
|
||||
extern const struct nf_nat_hook __rcu *nf_nat_hook;
|
||||
|
||||
static inline void
|
||||
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
struct nf_nat_hook *nat_hook;
|
||||
const struct nf_nat_hook *nat_hook;
|
||||
|
||||
rcu_read_lock();
|
||||
nat_hook = rcu_dereference(nf_nat_hook);
|
||||
@@ -440,7 +440,6 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
#include <linux/netfilter/nf_conntrack_zones_common.h>
|
||||
|
||||
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
|
||||
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
|
||||
struct nf_conntrack_tuple;
|
||||
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
|
||||
@@ -463,8 +462,9 @@ struct nf_ct_hook {
|
||||
void (*destroy)(struct nf_conntrack *);
|
||||
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
|
||||
const struct sk_buff *);
|
||||
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
|
||||
};
|
||||
extern struct nf_ct_hook __rcu *nf_ct_hook;
|
||||
extern const struct nf_ct_hook __rcu *nf_ct_hook;
|
||||
|
||||
struct nlattr;
|
||||
|
||||
@@ -479,7 +479,7 @@ struct nfnl_ct_hook {
|
||||
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo, s32 off);
|
||||
};
|
||||
extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
|
||||
extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
|
||||
|
||||
/**
|
||||
* nf_skb_duplicated - TEE target has sent a packet
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#ifndef _NF_CONNTRACK_COMMON_H
|
||||
#define _NF_CONNTRACK_COMMON_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <uapi/linux/netfilter/nf_conntrack_common.h>
|
||||
|
||||
struct ip_conntrack_stat {
|
||||
@@ -25,19 +25,21 @@ struct ip_conntrack_stat {
|
||||
#define NFCT_PTRMASK ~(NFCT_INFOMASK)
|
||||
|
||||
struct nf_conntrack {
|
||||
atomic_t use;
|
||||
refcount_t use;
|
||||
};
|
||||
|
||||
void nf_conntrack_destroy(struct nf_conntrack *nfct);
|
||||
|
||||
/* like nf_ct_put, but without module dependency on nf_conntrack */
|
||||
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
|
||||
{
|
||||
if (nfct && atomic_dec_and_test(&nfct->use))
|
||||
if (nfct && refcount_dec_and_test(&nfct->use))
|
||||
nf_conntrack_destroy(nfct);
|
||||
}
|
||||
static inline void nf_conntrack_get(struct nf_conntrack *nfct)
|
||||
{
|
||||
if (nfct)
|
||||
atomic_inc(&nfct->use);
|
||||
refcount_inc(&nfct->use);
|
||||
}
|
||||
|
||||
#endif /* _NF_CONNTRACK_COMMON_H */
|
||||
|
||||
@@ -94,7 +94,7 @@ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
|
||||
return skb;
|
||||
#endif
|
||||
|
||||
e = rcu_dereference(dev->nf_hooks_egress);
|
||||
e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held());
|
||||
if (!e)
|
||||
return skb;
|
||||
|
||||
|
||||
@@ -76,6 +76,8 @@ struct nf_conn {
|
||||
* Hint, SKB address this struct and refcnt via skb->_nfct and
|
||||
* helpers nf_conntrack_get() and nf_conntrack_put().
|
||||
* Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
|
||||
* except that the latter uses internal indirection and does not
|
||||
* result in a conntrack module dependency.
|
||||
* beware nf_ct_get() is different and don't inc refcnt.
|
||||
*/
|
||||
struct nf_conntrack ct_general;
|
||||
@@ -95,6 +97,7 @@ struct nf_conn {
|
||||
unsigned long status;
|
||||
|
||||
u16 cpu;
|
||||
u16 local_origin:1;
|
||||
possible_net_t ct_net;
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
@@ -169,11 +172,13 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
|
||||
return (struct nf_conn *)(nfct & NFCT_PTRMASK);
|
||||
}
|
||||
|
||||
void nf_ct_destroy(struct nf_conntrack *nfct);
|
||||
|
||||
/* decrement reference count on a conntrack */
|
||||
static inline void nf_ct_put(struct nf_conn *ct)
|
||||
{
|
||||
WARN_ON(!ct);
|
||||
nf_conntrack_put(&ct->ct_general);
|
||||
if (ct && refcount_dec_and_test(&ct->ct_general.use))
|
||||
nf_ct_destroy(&ct->ct_general);
|
||||
}
|
||||
|
||||
/* Protocol module loading */
|
||||
@@ -278,7 +283,7 @@ static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
|
||||
{
|
||||
s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
|
||||
|
||||
return timeout > 0 ? timeout : 0;
|
||||
return max(timeout, 0);
|
||||
}
|
||||
|
||||
static inline bool nf_ct_is_expired(const struct nf_conn *ct)
|
||||
|
||||
@@ -105,6 +105,8 @@ struct nft_data {
|
||||
};
|
||||
} __attribute__((aligned(__alignof__(u64))));
|
||||
|
||||
#define NFT_REG32_NUM 20
|
||||
|
||||
/**
|
||||
* struct nft_regs - nf_tables register set
|
||||
*
|
||||
@@ -115,11 +117,21 @@ struct nft_data {
|
||||
*/
|
||||
struct nft_regs {
|
||||
union {
|
||||
u32 data[20];
|
||||
u32 data[NFT_REG32_NUM];
|
||||
struct nft_verdict verdict;
|
||||
};
|
||||
};
|
||||
|
||||
struct nft_regs_track {
|
||||
struct {
|
||||
const struct nft_expr *selector;
|
||||
const struct nft_expr *bitwise;
|
||||
} regs[NFT_REG32_NUM];
|
||||
|
||||
const struct nft_expr *cur;
|
||||
const struct nft_expr *last;
|
||||
};
|
||||
|
||||
/* Store/load an u8, u16 or u64 integer to/from the u32 data register.
|
||||
*
|
||||
* Note, when using concatenations, register allocation happens at 32-bit
|
||||
@@ -346,6 +358,8 @@ int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
|
||||
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
|
||||
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
|
||||
const struct nft_expr *expr);
|
||||
bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
|
||||
const struct nft_expr *expr);
|
||||
|
||||
struct nft_set_ext;
|
||||
|
||||
@@ -884,6 +898,8 @@ struct nft_expr_ops {
|
||||
int (*validate)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nft_data **data);
|
||||
bool (*reduce)(struct nft_regs_track *track,
|
||||
const struct nft_expr *expr);
|
||||
bool (*gc)(struct net *net,
|
||||
const struct nft_expr *expr);
|
||||
int (*offload)(struct nft_offload_ctx *ctx,
|
||||
@@ -974,6 +990,20 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
|
||||
|
||||
#define NFT_CHAIN_POLICY_UNSET U8_MAX
|
||||
|
||||
struct nft_rule_dp {
|
||||
u64 is_last:1,
|
||||
dlen:12,
|
||||
handle:42; /* for tracing */
|
||||
unsigned char data[]
|
||||
__attribute__((aligned(__alignof__(struct nft_expr))));
|
||||
};
|
||||
|
||||
struct nft_rule_blob {
|
||||
unsigned long size;
|
||||
unsigned char data[]
|
||||
__attribute__((aligned(__alignof__(struct nft_rule_dp))));
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nft_chain - nf_tables chain
|
||||
*
|
||||
@@ -987,8 +1017,8 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
|
||||
* @name: name of the chain
|
||||
*/
|
||||
struct nft_chain {
|
||||
struct nft_rule *__rcu *rules_gen_0;
|
||||
struct nft_rule *__rcu *rules_gen_1;
|
||||
struct nft_rule_blob __rcu *blob_gen_0;
|
||||
struct nft_rule_blob __rcu *blob_gen_1;
|
||||
struct list_head rules;
|
||||
struct list_head list;
|
||||
struct rhlist_head rhlhead;
|
||||
@@ -1003,7 +1033,7 @@ struct nft_chain {
|
||||
u8 *udata;
|
||||
|
||||
/* Only used during control plane commit phase: */
|
||||
struct nft_rule **rules_next;
|
||||
struct nft_rule_blob *blob_next;
|
||||
};
|
||||
|
||||
int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
|
||||
@@ -1321,7 +1351,7 @@ struct nft_traceinfo {
|
||||
const struct nft_pktinfo *pkt;
|
||||
const struct nft_base_chain *basechain;
|
||||
const struct nft_chain *chain;
|
||||
const struct nft_rule *rule;
|
||||
const struct nft_rule_dp *rule;
|
||||
const struct nft_verdict *verdict;
|
||||
enum nft_trace_types type;
|
||||
bool packet_dumped;
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
extern struct nft_expr_type nft_imm_type;
|
||||
extern struct nft_expr_type nft_cmp_type;
|
||||
extern struct nft_expr_type nft_counter_type;
|
||||
extern struct nft_expr_type nft_lookup_type;
|
||||
extern struct nft_expr_type nft_bitwise_type;
|
||||
extern struct nft_expr_type nft_byteorder_type;
|
||||
@@ -21,6 +22,7 @@ extern struct nft_expr_type nft_last_type;
|
||||
#ifdef CONFIG_NETWORK_SECMARK
|
||||
extern struct nft_object_type nft_secmark_obj_type;
|
||||
#endif
|
||||
extern struct nft_object_type nft_counter_obj_type;
|
||||
|
||||
int nf_tables_core_module_init(void);
|
||||
void nf_tables_core_module_exit(void);
|
||||
@@ -120,6 +122,8 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
|
||||
bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext);
|
||||
|
||||
void nft_counter_init_seqcount(void);
|
||||
|
||||
struct nft_expr;
|
||||
struct nft_regs;
|
||||
struct nft_pktinfo;
|
||||
@@ -143,4 +147,6 @@ void nft_dynset_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs, const struct nft_pktinfo *pkt);
|
||||
void nft_rt_get_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs, const struct nft_pktinfo *pkt);
|
||||
void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt);
|
||||
#endif /* _NET_NF_TABLES_CORE_H */
|
||||
|
||||
@@ -100,6 +100,25 @@ static const struct nft_expr_ops nft_meta_bridge_get_ops = {
|
||||
.dump = nft_meta_get_dump,
|
||||
};
|
||||
|
||||
static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NFT_REG32_NUM; i++) {
|
||||
if (!track->regs[i].selector)
|
||||
continue;
|
||||
|
||||
if (track->regs[i].selector->ops != &nft_meta_bridge_get_ops)
|
||||
continue;
|
||||
|
||||
track->regs[i].selector = NULL;
|
||||
track->regs[i].bitwise = NULL;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct nft_expr_ops nft_meta_bridge_set_ops = {
|
||||
.type = &nft_meta_bridge_type,
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
|
||||
@@ -107,6 +126,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
|
||||
.init = nft_meta_set_init,
|
||||
.destroy = nft_meta_set_destroy,
|
||||
.dump = nft_meta_set_dump,
|
||||
.reduce = nft_meta_bridge_set_reduce,
|
||||
.validate = nft_meta_set_validate,
|
||||
};
|
||||
|
||||
|
||||
@@ -59,12 +59,8 @@ config NF_TABLES_ARP
|
||||
endif # NF_TABLES
|
||||
|
||||
config NF_FLOW_TABLE_IPV4
|
||||
tristate "Netfilter flow table IPv4 module"
|
||||
depends on NF_FLOW_TABLE
|
||||
help
|
||||
This option adds the flow table IPv4 support.
|
||||
|
||||
To compile it as a module, choose M here.
|
||||
tristate
|
||||
select NF_FLOW_TABLE_INET
|
||||
|
||||
config NF_DUP_IPV4
|
||||
tristate "Netfilter IPv4 packet duplication to alternate destination"
|
||||
|
||||
@@ -24,9 +24,6 @@ obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
|
||||
obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
|
||||
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
|
||||
|
||||
# flow table support
|
||||
obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o
|
||||
|
||||
# generic IP tables
|
||||
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
|
||||
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netfilter.h>
|
||||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
|
||||
static struct nf_flowtable_type flowtable_ipv4 = {
|
||||
.family = NFPROTO_IPV4,
|
||||
.init = nf_flow_table_init,
|
||||
.setup = nf_flow_table_offload_setup,
|
||||
.action = nf_flow_rule_route_ipv4,
|
||||
.free = nf_flow_table_free,
|
||||
.hook = nf_flow_offload_ip_hook,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init nf_flow_ipv4_module_init(void)
|
||||
{
|
||||
nft_register_flowtable_type(&flowtable_ipv4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit nf_flow_ipv4_module_exit(void)
|
||||
{
|
||||
nft_unregister_flowtable_type(&flowtable_ipv4);
|
||||
}
|
||||
|
||||
module_init(nf_flow_ipv4_module_init);
|
||||
module_exit(nf_flow_ipv4_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
|
||||
MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
|
||||
MODULE_DESCRIPTION("Netfilter flow table support");
|
||||
|
||||
@@ -48,12 +48,8 @@ endif # NF_TABLES_IPV6
|
||||
endif # NF_TABLES
|
||||
|
||||
config NF_FLOW_TABLE_IPV6
|
||||
tristate "Netfilter flow table IPv6 module"
|
||||
depends on NF_FLOW_TABLE
|
||||
help
|
||||
This option adds the flow table IPv6 support.
|
||||
|
||||
To compile it as a module, choose M here.
|
||||
tristate
|
||||
select NF_FLOW_TABLE_INET
|
||||
|
||||
config NF_DUP_IPV6
|
||||
tristate "Netfilter IPv6 packet duplication to alternate destination"
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netfilter.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
|
||||
static struct nf_flowtable_type flowtable_ipv6 = {
|
||||
.family = NFPROTO_IPV6,
|
||||
.init = nf_flow_table_init,
|
||||
.setup = nf_flow_table_offload_setup,
|
||||
.action = nf_flow_rule_route_ipv6,
|
||||
.free = nf_flow_table_free,
|
||||
.hook = nf_flow_offload_ipv6_hook,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init nf_flow_ipv6_module_init(void)
|
||||
{
|
||||
nft_register_flowtable_type(&flowtable_ipv6);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit nf_flow_ipv6_module_exit(void)
|
||||
{
|
||||
nft_unregister_flowtable_type(&flowtable_ipv6);
|
||||
}
|
||||
|
||||
module_init(nf_flow_ipv6_module_init);
|
||||
module_exit(nf_flow_ipv6_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
|
||||
MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
|
||||
MODULE_DESCRIPTION("Netfilter flow table IPv6 module");
|
||||
|
||||
@@ -515,12 +515,6 @@ config NFT_FLOW_OFFLOAD
|
||||
This option adds the "flow_offload" expression that you can use to
|
||||
choose what flows are placed into the hardware.
|
||||
|
||||
config NFT_COUNTER
|
||||
tristate "Netfilter nf_tables counter module"
|
||||
help
|
||||
This option adds the "counter" expression that you can use to
|
||||
include packet and byte counters in a rule.
|
||||
|
||||
config NFT_CONNLIMIT
|
||||
tristate "Netfilter nf_tables connlimit module"
|
||||
depends on NF_CONNTRACK
|
||||
|
||||
@@ -75,7 +75,7 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
|
||||
nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
|
||||
nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
|
||||
nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o nft_last.o \
|
||||
nft_chain_route.o nf_tables_offload.o \
|
||||
nft_counter.o nft_chain_route.o nf_tables_offload.o \
|
||||
nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
|
||||
nft_set_pipapo.o
|
||||
|
||||
@@ -100,7 +100,6 @@ obj-$(CONFIG_NFT_REJECT) += nft_reject.o
|
||||
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
|
||||
obj-$(CONFIG_NFT_REJECT_NETDEV) += nft_reject_netdev.o
|
||||
obj-$(CONFIG_NFT_TUNNEL) += nft_tunnel.o
|
||||
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
|
||||
obj-$(CONFIG_NFT_LOG) += nft_log.o
|
||||
obj-$(CONFIG_NFT_MASQ) += nft_masq.o
|
||||
obj-$(CONFIG_NFT_REDIR) += nft_redir.o
|
||||
|
||||
@@ -666,32 +666,29 @@ EXPORT_SYMBOL(nf_hook_slow_list);
|
||||
/* This needs to be compiled in any case to avoid dependencies between the
|
||||
* nfnetlink_queue code and nf_conntrack.
|
||||
*/
|
||||
struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
|
||||
const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nfnl_ct_hook);
|
||||
|
||||
struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
|
||||
const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_ct_hook);
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||
/* This does not belong here, but locally generated errors need it if connection
|
||||
tracking in use: without this, connection may not be in hash table, and hence
|
||||
manufactured ICMP or RST packets will not be associated with it. */
|
||||
void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
|
||||
__rcu __read_mostly;
|
||||
EXPORT_SYMBOL(ip_ct_attach);
|
||||
|
||||
struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
|
||||
const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_nat_hook);
|
||||
|
||||
/* This does not belong here, but locally generated errors need it if connection
|
||||
* tracking in use: without this, connection may not be in hash table, and hence
|
||||
* manufactured ICMP or RST packets will not be associated with it.
|
||||
*/
|
||||
void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
|
||||
{
|
||||
void (*attach)(struct sk_buff *, const struct sk_buff *);
|
||||
const struct nf_ct_hook *ct_hook;
|
||||
|
||||
if (skb->_nfct) {
|
||||
rcu_read_lock();
|
||||
attach = rcu_dereference(ip_ct_attach);
|
||||
if (attach)
|
||||
attach(new, skb);
|
||||
ct_hook = rcu_dereference(nf_ct_hook);
|
||||
if (ct_hook)
|
||||
ct_hook->attach(new, skb);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
@@ -699,7 +696,7 @@ EXPORT_SYMBOL(nf_ct_attach);
|
||||
|
||||
void nf_conntrack_destroy(struct nf_conntrack *nfct)
|
||||
{
|
||||
struct nf_ct_hook *ct_hook;
|
||||
const struct nf_ct_hook *ct_hook;
|
||||
|
||||
rcu_read_lock();
|
||||
ct_hook = rcu_dereference(nf_ct_hook);
|
||||
@@ -712,7 +709,7 @@ EXPORT_SYMBOL(nf_conntrack_destroy);
|
||||
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
struct nf_ct_hook *ct_hook;
|
||||
const struct nf_ct_hook *ct_hook;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@@ -559,7 +559,7 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
|
||||
|
||||
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
|
||||
|
||||
/* Released via destroy_conntrack() */
|
||||
/* Released via nf_ct_destroy() */
|
||||
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
||||
const struct nf_conntrack_zone *zone,
|
||||
gfp_t flags)
|
||||
@@ -586,7 +586,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
||||
tmpl->status = IPS_TEMPLATE;
|
||||
write_pnet(&tmpl->ct_net, net);
|
||||
nf_ct_zone_add(tmpl, zone);
|
||||
atomic_set(&tmpl->ct_general.use, 0);
|
||||
refcount_set(&tmpl->ct_general.use, 1);
|
||||
|
||||
return tmpl;
|
||||
}
|
||||
@@ -613,13 +613,12 @@ static void destroy_gre_conntrack(struct nf_conn *ct)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
destroy_conntrack(struct nf_conntrack *nfct)
|
||||
void nf_ct_destroy(struct nf_conntrack *nfct)
|
||||
{
|
||||
struct nf_conn *ct = (struct nf_conn *)nfct;
|
||||
|
||||
pr_debug("destroy_conntrack(%p)\n", ct);
|
||||
WARN_ON(atomic_read(&nfct->use) != 0);
|
||||
pr_debug("%s(%p)\n", __func__, ct);
|
||||
WARN_ON(refcount_read(&nfct->use) != 0);
|
||||
|
||||
if (unlikely(nf_ct_is_template(ct))) {
|
||||
nf_ct_tmpl_free(ct);
|
||||
@@ -644,9 +643,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
||||
if (ct->master)
|
||||
nf_ct_put(ct->master);
|
||||
|
||||
pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
|
||||
pr_debug("%s: returning ct=%p to slab\n", __func__, ct);
|
||||
nf_conntrack_free(ct);
|
||||
}
|
||||
EXPORT_SYMBOL(nf_ct_destroy);
|
||||
|
||||
static void nf_ct_delete_from_lists(struct nf_conn *ct)
|
||||
{
|
||||
@@ -743,7 +743,7 @@ nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
|
||||
/* caller must hold rcu readlock and none of the nf_conntrack_locks */
|
||||
static void nf_ct_gc_expired(struct nf_conn *ct)
|
||||
{
|
||||
if (!atomic_inc_not_zero(&ct->ct_general.use))
|
||||
if (!refcount_inc_not_zero(&ct->ct_general.use))
|
||||
return;
|
||||
|
||||
if (nf_ct_should_gc(ct))
|
||||
@@ -811,7 +811,7 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
|
||||
* in, try to obtain a reference and re-check tuple
|
||||
*/
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
|
||||
if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
|
||||
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
|
||||
goto found;
|
||||
|
||||
@@ -908,7 +908,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
||||
|
||||
smp_wmb();
|
||||
/* The caller holds a reference to this object */
|
||||
atomic_set(&ct->ct_general.use, 2);
|
||||
refcount_set(&ct->ct_general.use, 2);
|
||||
__nf_conntrack_hash_insert(ct, hash, reply_hash);
|
||||
nf_conntrack_double_unlock(hash, reply_hash);
|
||||
NF_CT_STAT_INC(net, insert);
|
||||
@@ -959,7 +959,7 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
|
||||
{
|
||||
struct nf_conn_tstamp *tstamp;
|
||||
|
||||
atomic_inc(&ct->ct_general.use);
|
||||
refcount_inc(&ct->ct_general.use);
|
||||
ct->status |= IPS_CONFIRMED;
|
||||
|
||||
/* set conntrack timestamp, if enabled. */
|
||||
@@ -990,7 +990,7 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
|
||||
|
||||
nf_ct_acct_merge(ct, ctinfo, loser_ct);
|
||||
nf_ct_add_to_dying_list(loser_ct);
|
||||
nf_conntrack_put(&loser_ct->ct_general);
|
||||
nf_ct_put(loser_ct);
|
||||
nf_ct_set(skb, ct, ctinfo);
|
||||
|
||||
NF_CT_STAT_INC(net, clash_resolve);
|
||||
@@ -1352,7 +1352,7 @@ static unsigned int early_drop_list(struct net *net,
|
||||
nf_ct_is_dying(tmp))
|
||||
continue;
|
||||
|
||||
if (!atomic_inc_not_zero(&tmp->ct_general.use))
|
||||
if (!refcount_inc_not_zero(&tmp->ct_general.use))
|
||||
continue;
|
||||
|
||||
/* kill only if still in same netns -- might have moved due to
|
||||
@@ -1470,7 +1470,7 @@ static void gc_worker(struct work_struct *work)
|
||||
continue;
|
||||
|
||||
/* need to take reference to avoid possible races */
|
||||
if (!atomic_inc_not_zero(&tmp->ct_general.use))
|
||||
if (!refcount_inc_not_zero(&tmp->ct_general.use))
|
||||
continue;
|
||||
|
||||
if (gc_worker_skip_ct(tmp)) {
|
||||
@@ -1570,7 +1570,7 @@ __nf_conntrack_alloc(struct net *net,
|
||||
/* Because we use RCU lookups, we set ct_general.use to zero before
|
||||
* this is inserted in any list.
|
||||
*/
|
||||
atomic_set(&ct->ct_general.use, 0);
|
||||
refcount_set(&ct->ct_general.use, 0);
|
||||
return ct;
|
||||
out:
|
||||
atomic_dec(&cnet->count);
|
||||
@@ -1595,7 +1595,7 @@ void nf_conntrack_free(struct nf_conn *ct)
|
||||
/* A freed object has refcnt == 0, that's
|
||||
* the golden rule for SLAB_TYPESAFE_BY_RCU
|
||||
*/
|
||||
WARN_ON(atomic_read(&ct->ct_general.use) != 0);
|
||||
WARN_ON(refcount_read(&ct->ct_general.use) != 0);
|
||||
|
||||
nf_ct_ext_destroy(ct);
|
||||
kmem_cache_free(nf_conntrack_cachep, ct);
|
||||
@@ -1687,8 +1687,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
||||
if (!exp)
|
||||
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
|
||||
|
||||
/* Now it is inserted into the unconfirmed list, bump refcount */
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
/* Now it is inserted into the unconfirmed list, set refcount to 1. */
|
||||
refcount_set(&ct->ct_general.use, 1);
|
||||
nf_ct_add_to_unconfirmed_list(ct);
|
||||
|
||||
local_bh_enable();
|
||||
@@ -1748,6 +1748,9 @@ resolve_normal_ct(struct nf_conn *tmpl,
|
||||
return 0;
|
||||
if (IS_ERR(h))
|
||||
return PTR_ERR(h);
|
||||
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
ct->local_origin = state->hook == NF_INET_LOCAL_OUT;
|
||||
}
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
@@ -1919,7 +1922,7 @@ repeat:
|
||||
/* Invalid: inverse of the return code tells
|
||||
* the netfilter core what to do */
|
||||
pr_debug("nf_conntrack_in: Can't track with proto module\n");
|
||||
nf_conntrack_put(&ct->ct_general);
|
||||
nf_ct_put(ct);
|
||||
skb->_nfct = 0;
|
||||
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
|
||||
if (ret == -NF_DROP)
|
||||
@@ -2083,9 +2086,9 @@ static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
|
||||
struct nf_conn *ct,
|
||||
enum ip_conntrack_info ctinfo)
|
||||
{
|
||||
const struct nf_nat_hook *nat_hook;
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct nf_conntrack_tuple tuple;
|
||||
struct nf_nat_hook *nat_hook;
|
||||
unsigned int status;
|
||||
int dataoff;
|
||||
u16 l3num;
|
||||
@@ -2298,7 +2301,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
|
||||
|
||||
return NULL;
|
||||
found:
|
||||
atomic_inc(&ct->ct_general.use);
|
||||
refcount_inc(&ct->ct_general.use);
|
||||
spin_unlock(lockp);
|
||||
local_bh_enable();
|
||||
return ct;
|
||||
@@ -2453,7 +2456,6 @@ static int kill_all(struct nf_conn *i, void *data)
|
||||
void nf_conntrack_cleanup_start(void)
|
||||
{
|
||||
conntrack_gc_work.exiting = true;
|
||||
RCU_INIT_POINTER(ip_ct_attach, NULL);
|
||||
}
|
||||
|
||||
void nf_conntrack_cleanup_end(void)
|
||||
@@ -2771,16 +2773,15 @@ err_cachep:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nf_ct_hook nf_conntrack_hook = {
|
||||
static const struct nf_ct_hook nf_conntrack_hook = {
|
||||
.update = nf_conntrack_update,
|
||||
.destroy = destroy_conntrack,
|
||||
.destroy = nf_ct_destroy,
|
||||
.get_tuple_skb = nf_conntrack_get_tuple_skb,
|
||||
.attach = nf_conntrack_attach,
|
||||
};
|
||||
|
||||
void nf_conntrack_init_end(void)
|
||||
{
|
||||
/* For use by REJECT target */
|
||||
RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
|
||||
RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
|
||||
}
|
||||
|
||||
|
||||
@@ -203,12 +203,12 @@ nf_ct_find_expectation(struct net *net,
|
||||
* about to invoke ->destroy(), or nf_ct_delete() via timeout
|
||||
* or early_drop().
|
||||
*
|
||||
* The atomic_inc_not_zero() check tells: If that fails, we
|
||||
* The refcount_inc_not_zero() check tells: If that fails, we
|
||||
* know that the ct is being destroyed. If it succeeds, we
|
||||
* can be sure the ct cannot disappear underneath.
|
||||
*/
|
||||
if (unlikely(nf_ct_is_dying(exp->master) ||
|
||||
!atomic_inc_not_zero(&exp->master->ct_general.use)))
|
||||
!refcount_inc_not_zero(&exp->master->ct_general.use)))
|
||||
return NULL;
|
||||
|
||||
if (exp->flags & NF_CT_EXPECT_PERMANENT) {
|
||||
|
||||
@@ -508,7 +508,7 @@ nla_put_failure:
|
||||
|
||||
static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
|
||||
{
|
||||
if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
|
||||
if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use))))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
@@ -1198,7 +1198,7 @@ restart:
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (nf_ct_is_expired(ct)) {
|
||||
if (i < ARRAY_SIZE(nf_ct_evict) &&
|
||||
atomic_inc_not_zero(&ct->ct_general.use))
|
||||
refcount_inc_not_zero(&ct->ct_general.use))
|
||||
nf_ct_evict[i++] = ct;
|
||||
continue;
|
||||
}
|
||||
@@ -1749,7 +1749,7 @@ restart:
|
||||
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
|
||||
ct, dying, 0);
|
||||
if (res < 0) {
|
||||
if (!atomic_inc_not_zero(&ct->ct_general.use))
|
||||
if (!refcount_inc_not_zero(&ct->ct_general.use))
|
||||
continue;
|
||||
cb->args[0] = cpu;
|
||||
cb->args[1] = (unsigned long)ct;
|
||||
@@ -1820,7 +1820,7 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
|
||||
const struct nlattr *attr)
|
||||
__must_hold(RCU)
|
||||
{
|
||||
struct nf_nat_hook *nat_hook;
|
||||
const struct nf_nat_hook *nat_hook;
|
||||
int err;
|
||||
|
||||
nat_hook = rcu_dereference(nf_nat_hook);
|
||||
@@ -2922,7 +2922,7 @@ static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
|
||||
nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
|
||||
}
|
||||
|
||||
static struct nfnl_ct_hook ctnetlink_glue_hook = {
|
||||
static const struct nfnl_ct_hook ctnetlink_glue_hook = {
|
||||
.build_size = ctnetlink_glue_build_size,
|
||||
.build = ctnetlink_glue_build,
|
||||
.parse = ctnetlink_glue_parse,
|
||||
|
||||
@@ -303,7 +303,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!ct);
|
||||
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
|
||||
if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use)))
|
||||
return 0;
|
||||
|
||||
if (nf_ct_should_gc(ct)) {
|
||||
@@ -370,7 +370,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
|
||||
ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
|
||||
ct_show_delta_time(s, ct);
|
||||
|
||||
seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
|
||||
seq_printf(s, "use=%u\n", refcount_read(&ct->ct_general.use));
|
||||
|
||||
if (seq_has_overflowed(s))
|
||||
goto release;
|
||||
|
||||
@@ -48,7 +48,7 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
|
||||
struct flow_offload *flow;
|
||||
|
||||
if (unlikely(nf_ct_is_dying(ct) ||
|
||||
!atomic_inc_not_zero(&ct->ct_general.use)))
|
||||
!refcount_inc_not_zero(&ct->ct_general.use)))
|
||||
return NULL;
|
||||
|
||||
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user