You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (82 commits) ipw2200: Call netif_*_queue() interfaces properly. netxen: Needs to include linux/vmalloc.h [netdrvr] atl1d: fix !CONFIG_PM build r6040: rework init_one error handling r6040: bump release number to 0.18 r6040: handle RX fifo full and no descriptor interrupts r6040: change the default waiting time r6040: use definitions for magic values in descriptor status r6040: completely rework the RX path r6040: call napi_disable when puting down the interface and set lp->dev accordingly. mv643xx_eth: fix NETPOLL build r6040: rework the RX buffers allocation routine r6040: fix scheduling while atomic in r6040_tx_timeout r6040: fix null pointer access and tx timeouts r6040: prefix all functions with r6040 rndis_host: support WM6 devices as modems at91_ether: use netstats in net_device structure sfc: Create one RX queue and interrupt per CPU package by default sfc: Use a separate workqueue for resets sfc: I2C adapter initialisation fixes ...
This commit is contained in:
@@ -569,6 +569,7 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
|
||||
* separate class since they always nest.
|
||||
*/
|
||||
static struct lock_class_key vlan_netdev_xmit_lock_key;
|
||||
static struct lock_class_key vlan_netdev_addr_lock_key;
|
||||
|
||||
static void vlan_dev_set_lockdep_one(struct net_device *dev,
|
||||
struct netdev_queue *txq,
|
||||
@@ -581,6 +582,9 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
|
||||
|
||||
static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
|
||||
{
|
||||
lockdep_set_class_and_subclass(&dev->addr_list_lock,
|
||||
&vlan_netdev_addr_lock_key,
|
||||
subclass);
|
||||
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
|
||||
}
|
||||
|
||||
|
||||
+68
-34
@@ -261,7 +261,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
|
||||
|
||||
DEFINE_PER_CPU(struct softnet_data, softnet_data);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
/*
|
||||
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
|
||||
* according to dev->type
|
||||
@@ -301,6 +301,7 @@ static const char *netdev_lock_name[] =
|
||||
"_xmit_NONE"};
|
||||
|
||||
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
|
||||
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
|
||||
|
||||
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
|
||||
{
|
||||
@@ -313,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
|
||||
return ARRAY_SIZE(netdev_lock_type) - 1;
|
||||
}
|
||||
|
||||
static inline void netdev_set_lockdep_class(spinlock_t *lock,
|
||||
unsigned short dev_type)
|
||||
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
|
||||
unsigned short dev_type)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -322,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock,
|
||||
lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
|
||||
netdev_lock_name[i]);
|
||||
}
|
||||
|
||||
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
i = netdev_lock_pos(dev->type);
|
||||
lockdep_set_class_and_name(&dev->addr_list_lock,
|
||||
&netdev_addr_lock_key[i],
|
||||
netdev_lock_name[i]);
|
||||
}
|
||||
#else
|
||||
static inline void netdev_set_lockdep_class(spinlock_t *lock,
|
||||
unsigned short dev_type)
|
||||
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
|
||||
unsigned short dev_type)
|
||||
{
|
||||
}
|
||||
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@@ -1645,32 +1659,6 @@ out_kfree_skb:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_queue_xmit - transmit a buffer
|
||||
* @skb: buffer to transmit
|
||||
*
|
||||
* Queue a buffer for transmission to a network device. The caller must
|
||||
* have set the device and priority and built the buffer before calling
|
||||
* this function. The function can be called from an interrupt.
|
||||
*
|
||||
* A negative errno code is returned on a failure. A success does not
|
||||
* guarantee the frame will be transmitted as it may be dropped due
|
||||
* to congestion or traffic shaping.
|
||||
*
|
||||
* -----------------------------------------------------------------------------------
|
||||
* I notice this method can also return errors from the queue disciplines,
|
||||
* including NET_XMIT_DROP, which is a positive value. So, errors can also
|
||||
* be positive.
|
||||
*
|
||||
* Regardless of the return value, the skb is consumed, so it is currently
|
||||
* difficult to retry a send to this method. (You can bump the ref count
|
||||
* before sending to hold a reference for retry if you are careful.)
|
||||
*
|
||||
* When calling this method, interrupts MUST be enabled. This is because
|
||||
* the BH enable code must have IRQs enabled so that it will not deadlock.
|
||||
* --BLG
|
||||
*/
|
||||
|
||||
static u32 simple_tx_hashrnd;
|
||||
static int simple_tx_hashrnd_initialized = 0;
|
||||
|
||||
@@ -1738,6 +1726,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
|
||||
return netdev_get_tx_queue(dev, queue_index);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_queue_xmit - transmit a buffer
|
||||
* @skb: buffer to transmit
|
||||
*
|
||||
* Queue a buffer for transmission to a network device. The caller must
|
||||
* have set the device and priority and built the buffer before calling
|
||||
* this function. The function can be called from an interrupt.
|
||||
*
|
||||
* A negative errno code is returned on a failure. A success does not
|
||||
* guarantee the frame will be transmitted as it may be dropped due
|
||||
* to congestion or traffic shaping.
|
||||
*
|
||||
* -----------------------------------------------------------------------------------
|
||||
* I notice this method can also return errors from the queue disciplines,
|
||||
* including NET_XMIT_DROP, which is a positive value. So, errors can also
|
||||
* be positive.
|
||||
*
|
||||
* Regardless of the return value, the skb is consumed, so it is currently
|
||||
* difficult to retry a send to this method. (You can bump the ref count
|
||||
* before sending to hold a reference for retry if you are careful.)
|
||||
*
|
||||
* When calling this method, interrupts MUST be enabled. This is because
|
||||
* the BH enable code must have IRQs enabled so that it will not deadlock.
|
||||
* --BLG
|
||||
*/
|
||||
int dev_queue_xmit(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
@@ -3852,7 +3865,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
|
||||
void *_unused)
|
||||
{
|
||||
spin_lock_init(&dev_queue->_xmit_lock);
|
||||
netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
|
||||
netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
|
||||
dev_queue->xmit_lock_owner = -1;
|
||||
}
|
||||
|
||||
@@ -3897,6 +3910,7 @@ int register_netdevice(struct net_device *dev)
|
||||
net = dev_net(dev);
|
||||
|
||||
spin_lock_init(&dev->addr_list_lock);
|
||||
netdev_set_addr_lockdep_class(dev);
|
||||
netdev_init_queue_locks(dev);
|
||||
|
||||
dev->iflink = -1;
|
||||
@@ -4207,7 +4221,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
|
||||
{
|
||||
struct netdev_queue *tx;
|
||||
struct net_device *dev;
|
||||
int alloc_size;
|
||||
size_t alloc_size;
|
||||
void *p;
|
||||
|
||||
BUG_ON(strlen(name) >= sizeof(dev->name));
|
||||
@@ -4227,7 +4241,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
|
||||
tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
|
||||
if (!tx) {
|
||||
printk(KERN_ERR "alloc_netdev: Unable to allocate "
|
||||
"tx qdiscs.\n");
|
||||
@@ -4686,6 +4700,26 @@ err_name:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
char *netdev_drivername(struct net_device *dev, char *buffer, int len)
|
||||
{
|
||||
struct device_driver *driver;
|
||||
struct device *parent;
|
||||
|
||||
if (len <= 0 || !buffer)
|
||||
return buffer;
|
||||
buffer[0] = 0;
|
||||
|
||||
parent = dev->dev.parent;
|
||||
|
||||
if (!parent)
|
||||
return buffer;
|
||||
|
||||
driver = parent->driver;
|
||||
if (driver && driver->name)
|
||||
strlcpy(buffer, driver->name, len);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static void __net_exit netdev_exit(struct net *net)
|
||||
{
|
||||
kfree(net->dev_name_head);
|
||||
|
||||
@@ -472,7 +472,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
if (likely(sysctl_tcp_sack)) {
|
||||
opts->options |= OPTION_SACK_ADVERTISE;
|
||||
if (unlikely(!OPTION_TS & opts->options))
|
||||
if (unlikely(!(OPTION_TS & opts->options)))
|
||||
size += TCPOLEN_SACKPERM_ALIGNED;
|
||||
}
|
||||
|
||||
|
||||
@@ -1325,6 +1325,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
|
||||
return -ENOPROTOOPT;
|
||||
if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
|
||||
val = 8;
|
||||
else if (val > USHORT_MAX)
|
||||
val = USHORT_MAX;
|
||||
up->pcslen = val;
|
||||
up->pcflag |= UDPLITE_SEND_CC;
|
||||
break;
|
||||
@@ -1337,6 +1339,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
|
||||
return -ENOPROTOOPT;
|
||||
if (val != 0 && val < 8) /* Avoid silly minimal values. */
|
||||
val = 8;
|
||||
else if (val > USHORT_MAX)
|
||||
val = USHORT_MAX;
|
||||
up->pcrlen = val;
|
||||
up->pcflag |= UDPLITE_RECV_CC;
|
||||
break;
|
||||
|
||||
+1
-1
@@ -153,7 +153,7 @@ static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
|
||||
|
||||
struct ipv6_devconf ipv6_devconf __read_mostly = {
|
||||
static struct ipv6_devconf ipv6_devconf __read_mostly = {
|
||||
.forwarding = 0,
|
||||
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
|
||||
.mtu6 = IPV6_MIN_MTU,
|
||||
|
||||
+22
-38
@@ -661,17 +661,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
|
||||
static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
|
||||
{
|
||||
if (net->ipv6.ip6_fib_timer->expires == 0 &&
|
||||
if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
|
||||
(rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
|
||||
mod_timer(net->ipv6.ip6_fib_timer, jiffies +
|
||||
net->ipv6.sysctl.ip6_rt_gc_interval);
|
||||
mod_timer(&net->ipv6.ip6_fib_timer,
|
||||
jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
|
||||
}
|
||||
|
||||
void fib6_force_start_gc(struct net *net)
|
||||
{
|
||||
if (net->ipv6.ip6_fib_timer->expires == 0)
|
||||
mod_timer(net->ipv6.ip6_fib_timer, jiffies +
|
||||
net->ipv6.sysctl.ip6_rt_gc_interval);
|
||||
if (!timer_pending(&net->ipv6.ip6_fib_timer))
|
||||
mod_timer(&net->ipv6.ip6_fib_timer,
|
||||
jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1447,27 +1447,23 @@ void fib6_run_gc(unsigned long expires, struct net *net)
|
||||
gc_args.timeout = expires ? (int)expires :
|
||||
net->ipv6.sysctl.ip6_rt_gc_interval;
|
||||
} else {
|
||||
local_bh_disable();
|
||||
if (!spin_trylock(&fib6_gc_lock)) {
|
||||
mod_timer(net->ipv6.ip6_fib_timer, jiffies + HZ);
|
||||
local_bh_enable();
|
||||
if (!spin_trylock_bh(&fib6_gc_lock)) {
|
||||
mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
|
||||
return;
|
||||
}
|
||||
gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
|
||||
}
|
||||
gc_args.more = 0;
|
||||
|
||||
icmp6_dst_gc(&gc_args.more);
|
||||
gc_args.more = icmp6_dst_gc();
|
||||
|
||||
fib6_clean_all(net, fib6_age, 0, NULL);
|
||||
|
||||
if (gc_args.more)
|
||||
mod_timer(net->ipv6.ip6_fib_timer, jiffies +
|
||||
net->ipv6.sysctl.ip6_rt_gc_interval);
|
||||
else {
|
||||
del_timer(net->ipv6.ip6_fib_timer);
|
||||
net->ipv6.ip6_fib_timer->expires = 0;
|
||||
}
|
||||
mod_timer(&net->ipv6.ip6_fib_timer,
|
||||
round_jiffies(jiffies
|
||||
+ net->ipv6.sysctl.ip6_rt_gc_interval));
|
||||
else
|
||||
del_timer(&net->ipv6.ip6_fib_timer);
|
||||
spin_unlock_bh(&fib6_gc_lock);
|
||||
}
|
||||
|
||||
@@ -1478,24 +1474,15 @@ static void fib6_gc_timer_cb(unsigned long arg)
|
||||
|
||||
static int fib6_net_init(struct net *net)
|
||||
{
|
||||
int ret;
|
||||
struct timer_list *timer;
|
||||
|
||||
ret = -ENOMEM;
|
||||
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
|
||||
if (!timer)
|
||||
goto out;
|
||||
|
||||
setup_timer(timer, fib6_gc_timer_cb, (unsigned long)net);
|
||||
net->ipv6.ip6_fib_timer = timer;
|
||||
setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
|
||||
|
||||
net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
|
||||
if (!net->ipv6.rt6_stats)
|
||||
goto out_timer;
|
||||
|
||||
net->ipv6.fib_table_hash =
|
||||
kzalloc(sizeof(*net->ipv6.fib_table_hash)*FIB_TABLE_HASHSZ,
|
||||
GFP_KERNEL);
|
||||
net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ,
|
||||
sizeof(*net->ipv6.fib_table_hash),
|
||||
GFP_KERNEL);
|
||||
if (!net->ipv6.fib_table_hash)
|
||||
goto out_rt6_stats;
|
||||
|
||||
@@ -1521,9 +1508,7 @@ static int fib6_net_init(struct net *net)
|
||||
#endif
|
||||
fib6_tables_init(net);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
||||
out_fib6_main_tbl:
|
||||
@@ -1534,15 +1519,14 @@ out_fib_table_hash:
|
||||
out_rt6_stats:
|
||||
kfree(net->ipv6.rt6_stats);
|
||||
out_timer:
|
||||
kfree(timer);
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void fib6_net_exit(struct net *net)
|
||||
{
|
||||
rt6_ifdown(net, NULL);
|
||||
del_timer_sync(net->ipv6.ip6_fib_timer);
|
||||
kfree(net->ipv6.ip6_fib_timer);
|
||||
del_timer_sync(&net->ipv6.ip6_fib_timer);
|
||||
|
||||
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
||||
kfree(net->ipv6.fib6_local_tbl);
|
||||
#endif
|
||||
|
||||
+4
-6
@@ -978,13 +978,12 @@ out:
|
||||
return &rt->u.dst;
|
||||
}
|
||||
|
||||
int icmp6_dst_gc(int *more)
|
||||
int icmp6_dst_gc(void)
|
||||
{
|
||||
struct dst_entry *dst, *next, **pprev;
|
||||
int freed;
|
||||
int more = 0;
|
||||
|
||||
next = NULL;
|
||||
freed = 0;
|
||||
|
||||
spin_lock_bh(&icmp6_dst_lock);
|
||||
pprev = &icmp6_dst_gc_list;
|
||||
@@ -993,16 +992,15 @@ int icmp6_dst_gc(int *more)
|
||||
if (!atomic_read(&dst->__refcnt)) {
|
||||
*pprev = dst->next;
|
||||
dst_free(dst);
|
||||
freed++;
|
||||
} else {
|
||||
pprev = &dst->next;
|
||||
(*more)++;
|
||||
++more;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&icmp6_dst_lock);
|
||||
|
||||
return freed;
|
||||
return more;
|
||||
}
|
||||
|
||||
static int ip6_dst_gc(struct dst_ops *ops)
|
||||
|
||||
@@ -73,6 +73,7 @@ static const struct proto_ops nr_proto_ops;
|
||||
* separate class since they always nest.
|
||||
*/
|
||||
static struct lock_class_key nr_netdev_xmit_lock_key;
|
||||
static struct lock_class_key nr_netdev_addr_lock_key;
|
||||
|
||||
static void nr_set_lockdep_one(struct net_device *dev,
|
||||
struct netdev_queue *txq,
|
||||
@@ -83,6 +84,7 @@ static void nr_set_lockdep_one(struct net_device *dev,
|
||||
|
||||
static void nr_set_lockdep_key(struct net_device *dev)
|
||||
{
|
||||
lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
|
||||
netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -74,6 +74,7 @@ ax25_address rose_callsign;
|
||||
* separate class since they always nest.
|
||||
*/
|
||||
static struct lock_class_key rose_netdev_xmit_lock_key;
|
||||
static struct lock_class_key rose_netdev_addr_lock_key;
|
||||
|
||||
static void rose_set_lockdep_one(struct net_device *dev,
|
||||
struct netdev_queue *txq,
|
||||
@@ -84,6 +85,7 @@ static void rose_set_lockdep_one(struct net_device *dev,
|
||||
|
||||
static void rose_set_lockdep_key(struct net_device *dev)
|
||||
{
|
||||
lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
|
||||
netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
|
||||
}
|
||||
|
||||
|
||||
+1
-1
@@ -447,7 +447,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
|
||||
}
|
||||
EXPORT_SYMBOL(qdisc_watchdog_cancel);
|
||||
|
||||
struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
|
||||
static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
|
||||
{
|
||||
unsigned int size = n * sizeof(struct hlist_head), i;
|
||||
struct hlist_head *h;
|
||||
|
||||
@@ -212,9 +212,9 @@ static void dev_watchdog(unsigned long arg)
|
||||
if (some_queue_stopped &&
|
||||
time_after(jiffies, (dev->trans_start +
|
||||
dev->watchdog_timeo))) {
|
||||
printk(KERN_INFO "NETDEV WATCHDOG: %s: "
|
||||
"transmit timed out\n",
|
||||
dev->name);
|
||||
char drivername[64];
|
||||
printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
|
||||
dev->name, netdev_drivername(dev, drivername, 64));
|
||||
dev->tx_timeout(dev);
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
+3
-1
@@ -71,6 +71,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
|
||||
|
||||
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
|
||||
|
||||
static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
|
||||
|
||||
/* Add data to the front of the queue. */
|
||||
static inline void sctp_outq_head_data(struct sctp_outq *q,
|
||||
struct sctp_chunk *ch)
|
||||
@@ -712,7 +714,7 @@ int sctp_outq_uncork(struct sctp_outq *q)
|
||||
* locking concerns must be made. Today we use the sock lock to protect
|
||||
* this function.
|
||||
*/
|
||||
int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
|
||||
static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
|
||||
{
|
||||
struct sctp_packet *packet;
|
||||
struct sctp_packet singleton;
|
||||
|
||||
@@ -519,8 +519,3 @@ int __init sctp_remaddr_proc_init(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sctp_assoc_proc_exit(void)
|
||||
{
|
||||
remove_proc_entry("remaddr", proc_net_sctp);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user