You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge master.kernel.org:/pub/scm/linux/kernel/git/acme/net-2.6.15
This commit is contained in:
@@ -171,7 +171,6 @@ enum {
|
||||
* struct sk_buff - socket buffer
|
||||
* @next: Next buffer in list
|
||||
* @prev: Previous buffer in list
|
||||
* @list: List we are on
|
||||
* @sk: Socket we are owned by
|
||||
* @tstamp: Time we arrived
|
||||
* @dev: Device we arrived on/are leaving by
|
||||
@@ -190,6 +189,7 @@ enum {
|
||||
* @cloned: Head may be cloned (check refcnt to be sure)
|
||||
* @nohdr: Payload reference only, must not modify header
|
||||
* @pkt_type: Packet class
|
||||
* @fclone: skbuff clone status
|
||||
* @ip_summed: Driver fed us an IP checksum
|
||||
* @priority: Packet queueing priority
|
||||
* @users: User count - see {datagram,tcp}.c
|
||||
@@ -202,6 +202,7 @@ enum {
|
||||
* @destructor: Destruct function
|
||||
* @nfmark: Can be used for communication between hooks
|
||||
* @nfct: Associated connection, if any
|
||||
* @ipvs_property: skbuff is owned by ipvs
|
||||
* @nfctinfo: Relationship of this skb to the connection
|
||||
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
|
||||
* @tc_index: Traffic control index
|
||||
|
||||
@@ -94,7 +94,6 @@ struct dst_ops
|
||||
struct dst_entry * (*negative_advice)(struct dst_entry *);
|
||||
void (*link_failure)(struct sk_buff *);
|
||||
void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
|
||||
int (*get_mss)(struct dst_entry *dst, u32 mtu);
|
||||
int entry_size;
|
||||
|
||||
atomic_t entries;
|
||||
|
||||
@@ -1625,12 +1625,9 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
|
||||
|
||||
memset(&ndst, 0, sizeof(ndst));
|
||||
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||
for_each_cpu(cpu) {
|
||||
struct neigh_statistics *st;
|
||||
|
||||
if (!cpu_possible(cpu))
|
||||
continue;
|
||||
|
||||
st = per_cpu_ptr(tbl->stats, cpu);
|
||||
ndst.ndts_allocs += st->allocs;
|
||||
ndst.ndts_destroys += st->destroys;
|
||||
|
||||
+230
-278
File diff suppressed because it is too large
Load Diff
@@ -122,6 +122,8 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
|
||||
* __alloc_skb - allocate a network buffer
|
||||
* @size: size to allocate
|
||||
* @gfp_mask: allocation mask
|
||||
* @fclone: allocate from fclone cache instead of head cache
|
||||
* and allocate a cloned (child) skb
|
||||
*
|
||||
* Allocate a new &sk_buff. The returned buffer has no headroom and a
|
||||
* tail room of size bytes. The object has a reference count of one.
|
||||
|
||||
@@ -719,22 +719,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
if (saddr->sdn_flags & ~SDF_WILD)
|
||||
return -EINVAL;
|
||||
|
||||
#if 1
|
||||
if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
|
||||
(saddr->sdn_flags & SDF_WILD)))
|
||||
return -EACCES;
|
||||
#else
|
||||
/*
|
||||
* Maybe put the default actions in the default security ops for
|
||||
* dn_prot_sock ? Would be nice if the capable call would go there
|
||||
* too.
|
||||
*/
|
||||
if (security_dn_prot_sock(saddr) &&
|
||||
!capable(CAP_NET_BIND_SERVICE) ||
|
||||
saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))
|
||||
return -EACCES;
|
||||
#endif
|
||||
|
||||
|
||||
if (!(saddr->sdn_flags & SDF_WILD)) {
|
||||
if (dn_ntohs(saddr->sdn_nodeaddrl)) {
|
||||
|
||||
+2
-1
@@ -715,6 +715,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
|
||||
break;
|
||||
ret = 0;
|
||||
if (ifa->ifa_mask != sin->sin_addr.s_addr) {
|
||||
u32 old_mask = ifa->ifa_mask;
|
||||
inet_del_ifa(in_dev, ifap, 0);
|
||||
ifa->ifa_mask = sin->sin_addr.s_addr;
|
||||
ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
|
||||
@@ -728,7 +729,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
|
||||
if ((dev->flags & IFF_BROADCAST) &&
|
||||
(ifa->ifa_prefixlen < 31) &&
|
||||
(ifa->ifa_broadcast ==
|
||||
(ifa->ifa_local|~ifa->ifa_mask))) {
|
||||
(ifa->ifa_local|~old_mask))) {
|
||||
ifa->ifa_broadcast = (ifa->ifa_local |
|
||||
~sin->sin_addr.s_addr);
|
||||
}
|
||||
|
||||
+1
-1
@@ -2404,7 +2404,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
|
||||
prefix = htonl(l->key);
|
||||
|
||||
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
|
||||
const struct fib_info *fi = rcu_dereference(fa->fa_info);
|
||||
const struct fib_info *fi = fa->fa_info;
|
||||
unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
|
||||
|
||||
if (fa->fa_type == RTN_BROADCAST
|
||||
|
||||
+1
-4
@@ -1108,12 +1108,9 @@ void __init icmp_init(struct net_proto_family *ops)
|
||||
struct inet_sock *inet;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for_each_cpu(i) {
|
||||
int err;
|
||||
|
||||
if (!cpu_possible(i))
|
||||
continue;
|
||||
|
||||
err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
|
||||
&per_cpu(__icmp_socket, i));
|
||||
|
||||
|
||||
@@ -1023,10 +1023,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
|
||||
int alloclen;
|
||||
|
||||
skb_prev = skb;
|
||||
if (skb_prev)
|
||||
fraggap = skb_prev->len - maxfraglen;
|
||||
else
|
||||
fraggap = 0;
|
||||
fraggap = skb_prev->len - maxfraglen;
|
||||
|
||||
alloclen = fragheaderlen + hh_len + fraggap + 15;
|
||||
skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
#include <linux/netfilter_ipv4/ip_conntrack_core.h>
|
||||
#include <linux/netfilter_ipv4/listhelp.h>
|
||||
|
||||
#define IP_CONNTRACK_VERSION "2.3"
|
||||
#define IP_CONNTRACK_VERSION "2.4"
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
@@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
|
||||
static int ip_conntrack_hash_rnd_initted;
|
||||
static unsigned int ip_conntrack_hash_rnd;
|
||||
|
||||
static u_int32_t
|
||||
hash_conntrack(const struct ip_conntrack_tuple *tuple)
|
||||
static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
|
||||
unsigned int size, unsigned int rnd)
|
||||
{
|
||||
#if 0
|
||||
dump_tuple(tuple);
|
||||
#endif
|
||||
return (jhash_3words(tuple->src.ip,
|
||||
(tuple->dst.ip ^ tuple->dst.protonum),
|
||||
(tuple->src.u.all | (tuple->dst.u.all << 16)),
|
||||
ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
|
||||
rnd) % size);
|
||||
}
|
||||
|
||||
static u_int32_t
|
||||
hash_conntrack(const struct ip_conntrack_tuple *tuple)
|
||||
{
|
||||
return __hash_conntrack(tuple, ip_conntrack_htable_size,
|
||||
ip_conntrack_hash_rnd);
|
||||
}
|
||||
|
||||
int
|
||||
@@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void free_conntrack_hash(void)
|
||||
static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
|
||||
{
|
||||
if (ip_conntrack_vmalloc)
|
||||
vfree(ip_conntrack_hash);
|
||||
if (vmalloced)
|
||||
vfree(hash);
|
||||
else
|
||||
free_pages((unsigned long)ip_conntrack_hash,
|
||||
get_order(sizeof(struct list_head)
|
||||
* ip_conntrack_htable_size));
|
||||
free_pages((unsigned long)hash,
|
||||
get_order(sizeof(struct list_head) * size));
|
||||
}
|
||||
|
||||
void ip_conntrack_flush()
|
||||
@@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void)
|
||||
ip_conntrack_flush();
|
||||
kmem_cache_destroy(ip_conntrack_cachep);
|
||||
kmem_cache_destroy(ip_conntrack_expect_cachep);
|
||||
free_conntrack_hash();
|
||||
free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
|
||||
ip_conntrack_htable_size);
|
||||
nf_unregister_sockopt(&so_getorigdst);
|
||||
}
|
||||
|
||||
static int hashsize;
|
||||
module_param(hashsize, int, 0400);
|
||||
static struct list_head *alloc_hashtable(int size, int *vmalloced)
|
||||
{
|
||||
struct list_head *hash;
|
||||
unsigned int i;
|
||||
|
||||
*vmalloced = 0;
|
||||
hash = (void*)__get_free_pages(GFP_KERNEL,
|
||||
get_order(sizeof(struct list_head)
|
||||
* size));
|
||||
if (!hash) {
|
||||
*vmalloced = 1;
|
||||
printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
|
||||
hash = vmalloc(sizeof(struct list_head) * size);
|
||||
}
|
||||
|
||||
if (hash)
|
||||
for (i = 0; i < size; i++)
|
||||
INIT_LIST_HEAD(&hash[i]);
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
int set_hashsize(const char *val, struct kernel_param *kp)
|
||||
{
|
||||
int i, bucket, hashsize, vmalloced;
|
||||
int old_vmalloced, old_size;
|
||||
int rnd;
|
||||
struct list_head *hash, *old_hash;
|
||||
struct ip_conntrack_tuple_hash *h;
|
||||
|
||||
/* On boot, we can set this without any fancy locking. */
|
||||
if (!ip_conntrack_htable_size)
|
||||
return param_set_int(val, kp);
|
||||
|
||||
hashsize = simple_strtol(val, NULL, 0);
|
||||
if (!hashsize)
|
||||
return -EINVAL;
|
||||
|
||||
hash = alloc_hashtable(hashsize, &vmalloced);
|
||||
if (!hash)
|
||||
return -ENOMEM;
|
||||
|
||||
/* We have to rehash for the new table anyway, so we also can
|
||||
* use a new random seed */
|
||||
get_random_bytes(&rnd, 4);
|
||||
|
||||
write_lock_bh(&ip_conntrack_lock);
|
||||
for (i = 0; i < ip_conntrack_htable_size; i++) {
|
||||
while (!list_empty(&ip_conntrack_hash[i])) {
|
||||
h = list_entry(ip_conntrack_hash[i].next,
|
||||
struct ip_conntrack_tuple_hash, list);
|
||||
list_del(&h->list);
|
||||
bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
|
||||
list_add_tail(&h->list, &hash[bucket]);
|
||||
}
|
||||
}
|
||||
old_size = ip_conntrack_htable_size;
|
||||
old_vmalloced = ip_conntrack_vmalloc;
|
||||
old_hash = ip_conntrack_hash;
|
||||
|
||||
ip_conntrack_htable_size = hashsize;
|
||||
ip_conntrack_vmalloc = vmalloced;
|
||||
ip_conntrack_hash = hash;
|
||||
ip_conntrack_hash_rnd = rnd;
|
||||
write_unlock_bh(&ip_conntrack_lock);
|
||||
|
||||
free_conntrack_hash(old_hash, old_vmalloced, old_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_param_call(hashsize, set_hashsize, param_get_uint,
|
||||
&ip_conntrack_htable_size, 0600);
|
||||
|
||||
int __init ip_conntrack_init(void)
|
||||
{
|
||||
@@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void)
|
||||
|
||||
/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
|
||||
* machine has 256 buckets. >= 1GB machines have 8192 buckets. */
|
||||
if (hashsize) {
|
||||
ip_conntrack_htable_size = hashsize;
|
||||
} else {
|
||||
if (!ip_conntrack_htable_size) {
|
||||
ip_conntrack_htable_size
|
||||
= (((num_physpages << PAGE_SHIFT) / 16384)
|
||||
/ sizeof(struct list_head));
|
||||
@@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* AK: the hash table is twice as big than needed because it
|
||||
uses list_head. it would be much nicer to caches to use a
|
||||
single pointer list head here. */
|
||||
ip_conntrack_vmalloc = 0;
|
||||
ip_conntrack_hash
|
||||
=(void*)__get_free_pages(GFP_KERNEL,
|
||||
get_order(sizeof(struct list_head)
|
||||
*ip_conntrack_htable_size));
|
||||
if (!ip_conntrack_hash) {
|
||||
ip_conntrack_vmalloc = 1;
|
||||
printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n");
|
||||
ip_conntrack_hash = vmalloc(sizeof(struct list_head)
|
||||
* ip_conntrack_htable_size);
|
||||
}
|
||||
ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size,
|
||||
&ip_conntrack_vmalloc);
|
||||
if (!ip_conntrack_hash) {
|
||||
printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
|
||||
goto err_unreg_sockopt;
|
||||
@@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void)
|
||||
ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
|
||||
write_unlock_bh(&ip_conntrack_lock);
|
||||
|
||||
for (i = 0; i < ip_conntrack_htable_size; i++)
|
||||
INIT_LIST_HEAD(&ip_conntrack_hash[i]);
|
||||
|
||||
/* For use by ipt_REJECT */
|
||||
ip_ct_attach = ip_conntrack_attach;
|
||||
|
||||
@@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void)
|
||||
err_free_conntrack_slab:
|
||||
kmem_cache_destroy(ip_conntrack_cachep);
|
||||
err_free_hash:
|
||||
free_conntrack_hash();
|
||||
free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
|
||||
ip_conntrack_htable_size);
|
||||
err_unreg_sockopt:
|
||||
nf_unregister_sockopt(&so_getorigdst);
|
||||
|
||||
|
||||
+1
-3
@@ -90,9 +90,7 @@ fold_field(void *mib[], int offt)
|
||||
unsigned long res = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_possible(i))
|
||||
continue;
|
||||
for_each_cpu(i) {
|
||||
res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
|
||||
res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
|
||||
}
|
||||
|
||||
+2
-7
@@ -700,10 +700,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
|
||||
struct sock *sk;
|
||||
int err, i, j;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_possible(i))
|
||||
continue;
|
||||
|
||||
for_each_cpu(i) {
|
||||
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
|
||||
&per_cpu(__icmpv6_socket, i));
|
||||
if (err < 0) {
|
||||
@@ -749,9 +746,7 @@ void icmpv6_cleanup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_possible(i))
|
||||
continue;
|
||||
for_each_cpu(i) {
|
||||
sock_release(per_cpu(__icmpv6_socket, i));
|
||||
}
|
||||
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
|
||||
|
||||
+1
-3
@@ -140,9 +140,7 @@ fold_field(void *mib[], int offt)
|
||||
unsigned long res = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_possible(i))
|
||||
continue;
|
||||
for_each_cpu(i) {
|
||||
res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
|
||||
res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
|
||||
}
|
||||
|
||||
@@ -740,11 +740,8 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t
|
||||
|
||||
int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
|
||||
{
|
||||
struct netlink_sock *nlk;
|
||||
int len = skb->len;
|
||||
|
||||
nlk = nlk_sk(sk);
|
||||
|
||||
skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, len);
|
||||
sock_put(sk);
|
||||
|
||||
@@ -727,7 +727,7 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg)
|
||||
}
|
||||
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
|
||||
return -EINVAL;
|
||||
if (rose_route.ndigis > 8) /* No more than 8 digipeats */
|
||||
if (rose_route.ndigis > AX25_MAX_DIGIS)
|
||||
return -EINVAL;
|
||||
err = rose_add_node(&rose_route, dev);
|
||||
dev_put(dev);
|
||||
|
||||
+1
-3
@@ -69,9 +69,7 @@ fold_field(void *mib[], int nr)
|
||||
unsigned long res = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (!cpu_possible(i))
|
||||
continue;
|
||||
for_each_cpu(i) {
|
||||
res +=
|
||||
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
|
||||
sizeof (unsigned long) * nr));
|
||||
|
||||
@@ -1192,46 +1192,6 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
|
||||
|
||||
EXPORT_SYMBOL(xfrm_bundle_ok);
|
||||
|
||||
/* Well... that's _TASK_. We need to scan through transformation
|
||||
* list and figure out what mss tcp should generate in order to
|
||||
* final datagram fit to mtu. Mama mia... :-)
|
||||
*
|
||||
* Apparently, some easy way exists, but we used to choose the most
|
||||
* bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
|
||||
*
|
||||
* Consider this function as something like dark humour. :-)
|
||||
*/
|
||||
static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
|
||||
{
|
||||
int res = mtu - dst->header_len;
|
||||
|
||||
for (;;) {
|
||||
struct dst_entry *d = dst;
|
||||
int m = res;
|
||||
|
||||
do {
|
||||
struct xfrm_state *x = d->xfrm;
|
||||
if (x) {
|
||||
spin_lock_bh(&x->lock);
|
||||
if (x->km.state == XFRM_STATE_VALID &&
|
||||
x->type && x->type->get_max_size)
|
||||
m = x->type->get_max_size(d->xfrm, m);
|
||||
else
|
||||
m += x->props.header_len;
|
||||
spin_unlock_bh(&x->lock);
|
||||
}
|
||||
} while ((d = d->child) != NULL);
|
||||
|
||||
if (m <= mtu)
|
||||
break;
|
||||
res -= (m - mtu);
|
||||
if (res < 88)
|
||||
return mtu;
|
||||
}
|
||||
|
||||
return res + dst->header_len;
|
||||
}
|
||||
|
||||
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
|
||||
{
|
||||
int err = 0;
|
||||
@@ -1252,8 +1212,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
|
||||
dst_ops->negative_advice = xfrm_negative_advice;
|
||||
if (likely(dst_ops->link_failure == NULL))
|
||||
dst_ops->link_failure = xfrm_link_failure;
|
||||
if (likely(dst_ops->get_mss == NULL))
|
||||
dst_ops->get_mss = xfrm_get_mss;
|
||||
if (likely(afinfo->garbage_collect == NULL))
|
||||
afinfo->garbage_collect = __xfrm_garbage_collect;
|
||||
xfrm_policy_afinfo[afinfo->family] = afinfo;
|
||||
@@ -1281,7 +1239,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
|
||||
dst_ops->check = NULL;
|
||||
dst_ops->negative_advice = NULL;
|
||||
dst_ops->link_failure = NULL;
|
||||
dst_ops->get_mss = NULL;
|
||||
afinfo->garbage_collect = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1026,6 +1026,12 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
|
||||
}
|
||||
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
|
||||
|
||||
/*
|
||||
* This function is NOT optimal. For example, with ESP it will give an
|
||||
* MTU that's usually two bytes short of being optimal. However, it will
|
||||
* usually give an answer that's a multiple of 4 provided the input is
|
||||
* also a multiple of 4.
|
||||
*/
|
||||
int xfrm_state_mtu(struct xfrm_state *x, int mtu)
|
||||
{
|
||||
int res = mtu;
|
||||
|
||||
Reference in New Issue
Block a user