Files
linux-apfs/net/core/dev.c
T

6621 lines
160 KiB
C
Raw Normal View History

2005-04-16 15:20:36 -07:00
/*
* NET3 Protocol independent device support routines.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Derived from the non IP parts of dev.c 1.0.19
* Authors: Ross Biro
2005-04-16 15:20:36 -07:00
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
*
* Additional Authors:
* Florian la Roche <rzsfl@rz.uni-sb.de>
* Alan Cox <gw4pts@gw4pts.ampr.org>
* David Hinds <dahinds@users.sourceforge.net>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
* Adam Sulmicki <adam@cfar.umd.edu>
* Pekka Riikonen <priikone@poesidon.pspt.fi>
*
* Changes:
* D.J. Barrow : Fixed bug where dev->refcnt gets set
* to 2 if register_netdev gets called
* before net_dev_init & also removed a
* few lines of code in the process.
* Alan Cox : device private ioctl copies fields back.
* Alan Cox : Transmit queue code does relevant
* stunts to keep the queue safe.
* Alan Cox : Fixed double lock.
* Alan Cox : Fixed promisc NULL pointer trap
* ???????? : Support the full private ioctl range
* Alan Cox : Moved ioctl permission check into
* drivers
* Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
* Alan Cox : 100 backlog just doesn't cut it when
* you start doing multicast video 8)
* Alan Cox : Rewrote net_bh and list manager.
* Alan Cox : Fix ETH_P_ALL echoback lengths.
* Alan Cox : Took out transmit every packet pass
* Saved a few bytes in the ioctl handler
* Alan Cox : Network driver sets packet type before
* calling netif_rx. Saves a function
* call a packet.
* Alan Cox : Hashed net_bh()
* Richard Kooijman: Timestamp fixes.
* Alan Cox : Wrong field in SIOCGIFDSTADDR
* Alan Cox : Device lock protection.
* Alan Cox : Fixed nasty side effect of device close
* changes.
* Rudi Cilibrasi : Pass the right thing to
* set_mac_address()
* Dave Miller : 32bit quantity for the device lock to
* make it work out on a Sparc.
* Bjorn Ekwall : Added KERNELD hack.
* Alan Cox : Cleaned up the backlog initialise.
* Craig Metz : SIOCGIFCONF fix if space for under
* 1 device.
* Thomas Bogendoerfer : Return ENODEV for dev_open, if there
* is no device open function.
* Andi Kleen : Fix error reporting for SIOCGIFCONF
* Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
* Cyrus Durgin : Cleaned for KMOD
* Adam Sulmicki : Bug Fix : Network Device Unload
* A network device unload needs to purge
* the backlog queue.
* Paul Rusty Russell : SIOCSIFNAME
* Pekka Riikonen : Netdev boot-time settings code
* Andrew Morton : Make unregister_netdevice wait
* indefinitely on dev->refcnt
* J Hadi Salim : - Backlog queue sampling
* - netif_rx() feedback
*/
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
2006-01-11 12:17:47 -08:00
#include <linux/capability.h>
2005-04-16 15:20:36 -07:00
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
2009-11-10 07:20:34 +00:00
#include <linux/hash.h>
#include <linux/slab.h>
2005-04-16 15:20:36 -07:00
#include <linux/sched.h>
2006-03-20 22:33:17 -08:00
#include <linux/mutex.h>
2005-04-16 15:20:36 -07:00
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
2005-04-16 15:20:36 -07:00
#include <linux/notifier.h>
#include <linux/skbuff.h>
2007-09-12 12:01:34 +02:00
#include <net/net_namespace.h>
2005-04-16 15:20:36 -07:00
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <net/dst.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
#include <net/xfrm.h>
2005-04-16 15:20:36 -07:00
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/netpoll.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
2007-04-26 20:43:56 -07:00
#include <net/wext.h>
2005-04-16 15:20:36 -07:00
#include <net/iw_handler.h>
#include <asm/current.h>
2005-12-03 08:39:35 -05:00
#include <linux/audit.h>
#include <linux/dmaengine.h>
2006-06-22 02:57:17 -07:00
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
2008-07-15 03:47:03 -07:00
#include <linux/ip.h>
#include <net/ip.h>
2008-07-15 03:47:03 -07:00
#include <linux/ipv6.h>
#include <linux/in.h>
2008-07-21 09:48:06 -07:00
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
2010-08-23 18:45:02 +09:00
#include <trace/events/net.h>
2010-08-23 18:46:12 +09:00
#include <trace/events/skb.h>
2010-03-30 22:35:50 +00:00
#include <linux/pci.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
2011-10-19 17:00:35 -04:00
#include <linux/net_tstamp.h>
2011-11-15 04:12:55 +00:00
#include <linux/jump_label.h>
#include <net/flow_keys.h>
2005-04-16 15:20:36 -07:00
#include "net-sysfs.h"
/* Instead of increasing this, you should create a hash table. */
#define MAX_GRO_SKBS 8
2009-01-04 16:13:40 -08:00
/* This should be increased if a protocol with a bigger head is added. */
#define GRO_MAX_HEAD (MAX_HEADER + 128)
2005-04-16 15:20:36 -07:00
/*
* The list of packet types we will receive (as opposed to discard)
* and the routines to invoke.
*
* Why 16. Because with 16 the only overlap we get on a hash of the
* low nibble of the protocol value is RARP/SNAP/X.25.
*
* NOTE: That is no longer true with the addition of VLAN tags. Not
* sure which should go first, but I bet it won't make much
* difference if we are running VLANs. The good news is that
* this protocol won't be in the list unless compiled in, so
2006-05-26 13:25:24 -07:00
* the average user (w/out VLANs) will not be adversely affected.
2005-04-16 15:20:36 -07:00
* --BLG
*
* 0800 IP
* 8100 802.1Q VLAN
* 0001 802.3
* 0002 AX.25
* 0004 802.2
* 8035 RARP
* 0005 SNAP
* 0805 X.25
* 0806 ARP
* 8137 IPX
* 0009 Localtalk
* 86DD IPv6
*/
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
2005-04-16 15:20:36 -07:00
static DEFINE_SPINLOCK(ptype_lock);
static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
2007-03-12 14:33:50 -07:00
static struct list_head ptype_all __read_mostly; /* Taps */
2005-04-16 15:20:36 -07:00
/*
2007-05-03 15:13:45 -07:00
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
2005-04-16 15:20:36 -07:00
* semaphore.
*
* Pure readers hold dev_base_lock for reading, or rcu_read_lock()
2005-04-16 15:20:36 -07:00
*
* Writers must hold the rtnl semaphore while they loop through the
2007-05-03 15:13:45 -07:00
* dev_base_head list, and hold dev_base_lock for writing when they do the
2005-04-16 15:20:36 -07:00
* actual updates. This allows pure readers to access the list even
* while a writer is preparing to update it.
*
* To put it another way, dev_base_lock is held for writing only to
* protect against pure readers; the rtnl semaphore provides the
* protection against other writers.
*
* See, for example usages, register_netdevice() and
* unregister_netdevice(), which must be called with the rtnl
* semaphore held.
*/
DEFINE_RWLOCK(dev_base_lock);
EXPORT_SYMBOL(dev_base_lock);
2011-06-21 03:11:20 +00:00
static inline void dev_base_seq_inc(struct net *net)
{
while (++net->dev_base_seq == 0);
}
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
2005-04-16 15:20:36 -07:00
{
unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
2009-11-10 07:20:34 +00:00
return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
2005-04-16 15:20:36 -07:00
}
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
2005-04-16 15:20:36 -07:00
{
2009-10-24 06:13:17 -07:00
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
2005-04-16 15:20:36 -07:00
}
2010-04-19 21:17:14 +00:00
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
2010-04-19 21:17:14 +00:00
spin_lock(&sd->input_pkt_queue.lock);
#endif
}
2010-04-19 21:17:14 +00:00
static inline void rps_unlock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
2010-04-19 21:17:14 +00:00
spin_unlock(&sd->input_pkt_queue.lock);
#endif
}
/* Device list insertion */
static int list_netdevice(struct net_device *dev)
{
struct net *net = dev_net(dev);
ASSERT_RTNL();
write_lock_bh(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
2009-10-30 07:11:27 +00:00
hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
2009-10-19 19:18:49 +00:00
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
write_unlock_bh(&dev_base_lock);
2011-06-21 03:11:20 +00:00
dev_base_seq_inc(net);
return 0;
}
2009-10-19 19:18:49 +00:00
/* Device list removal
* caller must respect a RCU grace period before freeing/reusing dev
*/
static void unlist_netdevice(struct net_device *dev)
{
ASSERT_RTNL();
/* Unlink dev from the device chain */
write_lock_bh(&dev_base_lock);
list_del_rcu(&dev->dev_list);
2009-10-30 07:11:27 +00:00
hlist_del_rcu(&dev->name_hlist);
2009-10-19 19:18:49 +00:00
hlist_del_rcu(&dev->index_hlist);
write_unlock_bh(&dev_base_lock);
2011-06-21 03:11:20 +00:00
dev_base_seq_inc(dev_net(dev));
}
2005-04-16 15:20:36 -07:00
/*
* Our notifier list
*/
2006-05-09 15:23:03 -07:00
static RAW_NOTIFIER_HEAD(netdev_chain);
2005-04-16 15:20:36 -07:00
/*
* Device drivers call our routines to queue packets here. We empty the
* queue in the local softnet handler.
*/
2010-04-17 04:17:02 +00:00
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2009-09-03 01:29:39 -07:00
EXPORT_PER_CPU_SYMBOL(softnet_data);
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
* according to dev->type
*/
static const unsigned short netdev_lock_type[] =
{ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2008-12-17 15:47:29 -08:00
ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
2009-08-14 20:00:20 +04:00
ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
2009-06-08 12:18:47 +00:00
ARPHRD_VOID, ARPHRD_NONE};
2009-08-05 10:42:58 -07:00
static const char *const netdev_lock_name[] =
{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
"_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2008-12-17 15:47:29 -08:00
"_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
2009-08-14 20:00:20 +04:00
"_xmit_PHONET_PIPE", "_xmit_IEEE802154",
2009-06-08 12:18:47 +00:00
"_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
static inline unsigned short netdev_lock_pos(unsigned short dev_type)
{
int i;
for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
if (netdev_lock_type[i] == dev_type)
return i;
/* the last key is used by default */
return ARRAY_SIZE(netdev_lock_type) - 1;
}
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
int i;
i = netdev_lock_pos(dev_type);
lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
netdev_lock_name[i]);
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
int i;
i = netdev_lock_pos(dev->type);
lockdep_set_class_and_name(&dev->addr_list_lock,
&netdev_addr_lock_key[i],
netdev_lock_name[i]);
}
#else
static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
unsigned short dev_type)
{
}
static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
{
}
#endif
2005-04-16 15:20:36 -07:00
/*******************************************************************************
Protocol management and registration routines
*******************************************************************************/
/*
* Add a protocol ID to the list. Now that the input handler is
* smarter we can dispense with all the messy stuff that used to be
* here.
*
* BEWARE!!! Protocol handlers, mangling input packets,
* MUST BE last in hash buckets and checking protocol handlers
* MUST start from promiscuous ptype_all chain in net_bh.
* It is true now, do not change it.
* Explanation follows: if protocol handler, mangling packet, will
* be the first on list, it is not able to sense, that packet
* is cloned and should be copied-on-write, so that it will
* change it and subsequent readers will get broken packet.
* --ANK (980803)
*/
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL))
return &ptype_all;
else
return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
}
2005-04-16 15:20:36 -07:00
/**
* dev_add_pack - add packet handler
* @pt: packet type declaration
*
* Add a protocol handler to the networking stack. The passed &packet_type
* is linked into kernel lists and may not be freed until it has been
* removed from the kernel lists.
*
2007-02-09 23:24:36 +09:00
* This call does not sleep therefore it can not
2005-04-16 15:20:36 -07:00
* guarantee all CPU's that are in middle of receiving packets
* will see the new packet type (until the next received packet).
*/
void dev_add_pack(struct packet_type *pt)
{
struct list_head *head = ptype_head(pt);
2005-04-16 15:20:36 -07:00
spin_lock(&ptype_lock);
list_add_rcu(&pt->list, head);
spin_unlock(&ptype_lock);
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_add_pack);
2005-04-16 15:20:36 -07:00
/**
* __dev_remove_pack - remove packet handler
* @pt: packet type declaration
*
* Remove a protocol handler that was previously added to the kernel
* protocol handlers by dev_add_pack(). The passed &packet_type is removed
* from the kernel lists and can be freed or reused once this function
2007-02-09 23:24:36 +09:00
* returns.
2005-04-16 15:20:36 -07:00
*
* The packet type might still be in use by receivers
* and must not be freed until after all the CPU's have gone
* through a quiescent state.
*/
void __dev_remove_pack(struct packet_type *pt)
{
struct list_head *head = ptype_head(pt);
2005-04-16 15:20:36 -07:00
struct packet_type *pt1;
spin_lock(&ptype_lock);
2005-04-16 15:20:36 -07:00
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
list_del_rcu(&pt->list);
goto out;
}
}
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
spin_unlock(&ptype_lock);
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(__dev_remove_pack);
2005-04-16 15:20:36 -07:00
/**
* dev_remove_pack - remove packet handler
* @pt: packet type declaration
*
* Remove a protocol handler that was previously added to the kernel
* protocol handlers by dev_add_pack(). The passed &packet_type is removed
* from the kernel lists and can be freed or reused once this function
* returns.
*
* This call sleeps to guarantee that no CPU is looking at the packet
* type after return.
*/
void dev_remove_pack(struct packet_type *pt)
{
__dev_remove_pack(pt);
2007-02-09 23:24:36 +09:00
2005-04-16 15:20:36 -07:00
synchronize_net();
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_remove_pack);
2005-04-16 15:20:36 -07:00
/******************************************************************************
Device Boot-time Settings Routines
*******************************************************************************/
/* Boot time configuration table */
static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
/**
* netdev_boot_setup_add - add new setup entry
* @name: name of the device
* @map: configured settings for the device
*
* Adds new setup entry to the dev_boot_setup list. The function
* returns 0 on error and 1 on success. This is a generic routine to
* all netdevices.
*/
static int netdev_boot_setup_add(char *name, struct ifmap *map)
{
struct netdev_boot_setup *s;
int i;
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
strlcpy(s[i].name, name, IFNAMSIZ);
2005-04-16 15:20:36 -07:00
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
}
return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
}
/**
* netdev_boot_setup_check - check boot time settings
* @dev: the netdevice
*
* Check boot time settings for the device.
* The found settings are set for the device to be used
* later in the device probing.
* Returns 0 if no settings found, 1 if they are.
*/
int netdev_boot_setup_check(struct net_device *dev)
{
struct netdev_boot_setup *s = dev_boot_setup;
int i;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
!strcmp(dev->name, s[i].name)) {
2005-04-16 15:20:36 -07:00
dev->irq = s[i].map.irq;
dev->base_addr = s[i].map.base_addr;
dev->mem_start = s[i].map.mem_start;
dev->mem_end = s[i].map.mem_end;
return 1;
}
}
return 0;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(netdev_boot_setup_check);
2005-04-16 15:20:36 -07:00
/**
* netdev_boot_base - get address from boot time settings
* @prefix: prefix for network device
* @unit: id for network device
*
* Check boot time settings for the base address of device.
* The found settings are set for the device to be used
* later in the device probing.
* Returns 0 if no settings found.
*/
unsigned long netdev_boot_base(const char *prefix, int unit)
{
const struct netdev_boot_setup *s = dev_boot_setup;
char name[IFNAMSIZ];
int i;
sprintf(name, "%s%d", prefix, unit);
/*
* If device already registered then return base of 1
* to indicate not to probe for this interface
*/
if (__dev_get_by_name(&init_net, name))
2005-04-16 15:20:36 -07:00
return 1;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
if (!strcmp(name, s[i].name))
return s[i].map.base_addr;
return 0;
}
/*
* Saves at boot time configured settings for any netdevice.
*/
int __init netdev_boot_setup(char *str)
{
int ints[5];
struct ifmap map;
str = get_options(str, ARRAY_SIZE(ints), ints);
if (!str || !*str)
return 0;
/* Save settings */
memset(&map, 0, sizeof(map));
if (ints[0] > 0)
map.irq = ints[1];
if (ints[0] > 1)
map.base_addr = ints[2];
if (ints[0] > 2)
map.mem_start = ints[3];
if (ints[0] > 3)
map.mem_end = ints[4];
/* Add new entry to the list */
return netdev_boot_setup_add(str, &map);
}
__setup("netdev=", netdev_boot_setup);
/*******************************************************************************
Device Interface Subroutines
*******************************************************************************/
/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @name: name to find
*
* Find an interface by name. Must be called under RTNL semaphore
* or @dev_base_lock. If the name is found a pointer to the device
* is returned. If the name is not found then %NULL is returned. The
* reference counters are not incremented so the caller must be
* careful with locks.
*/
struct net_device *__dev_get_by_name(struct net *net, const char *name)
2005-04-16 15:20:36 -07:00
{
struct hlist_node *p;
2009-10-30 01:40:11 -07:00
struct net_device *dev;
struct hlist_head *head = dev_name_hash(net, name);
2005-04-16 15:20:36 -07:00
2009-10-30 01:40:11 -07:00
hlist_for_each_entry(dev, p, head, name_hlist)
2005-04-16 15:20:36 -07:00
if (!strncmp(dev->name, name, IFNAMSIZ))
return dev;
2009-10-30 01:40:11 -07:00
2005-04-16 15:20:36 -07:00
return NULL;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(__dev_get_by_name);
2005-04-16 15:20:36 -07:00
2009-10-30 07:11:27 +00:00
/**
* dev_get_by_name_rcu - find a device by its name
* @net: the applicable net namespace
* @name: name to find
*
* Find an interface by name.
* If the name is found a pointer to the device is returned.
* If the name is not found then %NULL is returned.
* The reference counters are not incremented so the caller must be
* careful with locks. The caller must hold RCU lock.
*/
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
{
struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_name_hash(net, name);
hlist_for_each_entry_rcu(dev, p, head, name_hlist)
if (!strncmp(dev->name, name, IFNAMSIZ))
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_get_by_name_rcu);
2005-04-16 15:20:36 -07:00
/**
* dev_get_by_name - find a device by its name
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @name: name to find
*
* Find an interface by name. This can be called from any
* context and does its own locking. The returned handle has
* the usage count incremented and the caller must use dev_put() to
* release it when it is no longer needed. %NULL is returned if no
* matching device is found.
*/
struct net_device *dev_get_by_name(struct net *net, const char *name)
2005-04-16 15:20:36 -07:00
{
struct net_device *dev;
2009-10-30 07:11:27 +00:00
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
2005-04-16 15:20:36 -07:00
if (dev)
dev_hold(dev);
2009-10-30 07:11:27 +00:00
rcu_read_unlock();
2005-04-16 15:20:36 -07:00
return dev;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_get_by_name);
2005-04-16 15:20:36 -07:00
/**
* __dev_get_by_index - find a device by its ifindex
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @ifindex: index of device
*
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
* about locking. The caller must hold either the RTNL semaphore
* or @dev_base_lock.
*/
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
2005-04-16 15:20:36 -07:00
{
struct hlist_node *p;
2009-10-30 01:40:11 -07:00
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
2005-04-16 15:20:36 -07:00
2009-10-30 01:40:11 -07:00
hlist_for_each_entry(dev, p, head, index_hlist)
2005-04-16 15:20:36 -07:00
if (dev->ifindex == ifindex)
return dev;
2009-10-30 01:40:11 -07:00
2005-04-16 15:20:36 -07:00
return NULL;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(__dev_get_by_index);
2005-04-16 15:20:36 -07:00
2009-10-19 19:18:49 +00:00
/**
* dev_get_by_index_rcu - find a device by its ifindex
* @net: the applicable net namespace
* @ifindex: index of device
*
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
* about locking. The caller must hold RCU lock.
*/
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
{
struct hlist_node *p;
struct net_device *dev;
struct hlist_head *head = dev_index_hash(net, ifindex);
hlist_for_each_entry_rcu(dev, p, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
return NULL;
}
EXPORT_SYMBOL(dev_get_by_index_rcu);
2005-04-16 15:20:36 -07:00
/**
* dev_get_by_index - find a device by its ifindex
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @ifindex: index of device
*
* Search for an interface by index. Returns NULL if the device
* is not found or a pointer to the device. The device returned has
* had a reference added and the pointer is safe until the user calls
* dev_put to indicate they have finished with it.
*/
struct net_device *dev_get_by_index(struct net *net, int ifindex)
2005-04-16 15:20:36 -07:00
{
struct net_device *dev;
2009-10-19 19:18:49 +00:00
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
2005-04-16 15:20:36 -07:00
if (dev)
dev_hold(dev);
2009-10-19 19:18:49 +00:00
rcu_read_unlock();
2005-04-16 15:20:36 -07:00
return dev;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_get_by_index);
2005-04-16 15:20:36 -07:00
/**
* dev_getbyhwaddr_rcu - find a device by its hardware address
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @type: media type of device
* @ha: hardware address
*
* Search for an interface by MAC address. Returns NULL if the device
2011-01-24 13:16:16 -08:00
* is not found or a pointer to the device.
* The caller must hold RCU or RTNL.
* The returned device has not had its ref count increased
2005-04-16 15:20:36 -07:00
* and the caller must therefore be careful about locking
*
*/
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *ha)
2005-04-16 15:20:36 -07:00
{
struct net_device *dev;
for_each_netdev_rcu(net, dev)
2005-04-16 15:20:36 -07:00
if (dev->type == type &&
!memcmp(dev->dev_addr, ha, dev->addr_len))
2007-05-03 15:13:45 -07:00
return dev;
return NULL;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
2005-09-22 04:44:55 -03:00
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
2007-05-03 03:28:13 -07:00
{
struct net_device *dev;
ASSERT_RTNL();
for_each_netdev(net, dev)
2007-05-03 03:28:13 -07:00
if (dev->type == type)
2007-05-03 15:13:45 -07:00
return dev;
return NULL;
2007-05-03 03:28:13 -07:00
}
EXPORT_SYMBOL(__dev_getfirstbyhwtype);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
2005-04-16 15:20:36 -07:00
{
2010-03-18 11:27:25 +00:00
struct net_device *dev, *ret = NULL;
2005-04-16 15:20:36 -07:00
2010-03-18 11:27:25 +00:00
rcu_read_lock();
for_each_netdev_rcu(net, dev)
if (dev->type == type) {
dev_hold(dev);
ret = dev;
break;
}
rcu_read_unlock();
return ret;
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
2010-06-07 11:42:13 +00:00
* dev_get_by_flags_rcu - find any device with given flags
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @if_flags: IFF_* values
* @mask: bitmask of bits in if_flags to check
*
* Search for any interface with the given flags. Returns NULL if a device
2010-06-07 11:42:13 +00:00
* is not found or a pointer to the device. Must be called inside
* rcu_read_lock(), and result refcount is unchanged.
2005-04-16 15:20:36 -07:00
*/
2010-06-07 11:42:13 +00:00
struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
2009-09-03 01:29:39 -07:00
unsigned short mask)
2005-04-16 15:20:36 -07:00
{
2007-05-03 15:13:45 -07:00
struct net_device *dev, *ret;
2005-04-16 15:20:36 -07:00
2007-05-03 15:13:45 -07:00
ret = NULL;
for_each_netdev_rcu(net, dev) {
2005-04-16 15:20:36 -07:00
if (((dev->flags ^ if_flags) & mask) == 0) {
2007-05-03 15:13:45 -07:00
ret = dev;
2005-04-16 15:20:36 -07:00
break;
}
}
2007-05-03 15:13:45 -07:00
return ret;
2005-04-16 15:20:36 -07:00
}
2010-06-07 11:42:13 +00:00
EXPORT_SYMBOL(dev_get_by_flags_rcu);
2005-04-16 15:20:36 -07:00
/**
* dev_valid_name - check if name is okay for network device
* @name: name string
*
* Network device names need to be valid file names to
* to allow sysfs to work. We also disallow any kind of
* whitespace.
2005-04-16 15:20:36 -07:00
*/
2005-11-09 10:34:45 -08:00
int dev_valid_name(const char *name)
2005-04-16 15:20:36 -07:00
{
if (*name == '\0')
return 0;
2006-08-29 17:06:13 -07:00
if (strlen(name) >= IFNAMSIZ)
return 0;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return 0;
while (*name) {
if (*name == '/' || isspace(*name))
return 0;
name++;
}
return 1;
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_valid_name);
2005-04-16 15:20:36 -07:00
/**
* __dev_alloc_name - allocate a name for a device
* @net: network namespace to allocate the device name in
2005-04-16 15:20:36 -07:00
* @name: name format string
* @buf: scratch buffer and result name string
2005-04-16 15:20:36 -07:00
*
* Passed a format string - eg "lt%d" it will try and find a suitable
2006-05-26 13:25:24 -07:00
* id. It scans list of devices to build up a free map, then chooses
* the first empty slot. The caller must hold the dev_base or rtnl lock
* while allocating the name and adding the device in order to avoid
* duplicates.
* Limited to bits_per_byte * page size devices (ie 32K on most platforms).
* Returns the number of the unit assigned or a negative errno code.
2005-04-16 15:20:36 -07:00
*/
static int __dev_alloc_name(struct net *net, const char *name, char *buf)
2005-04-16 15:20:36 -07:00
{
int i = 0;
const char *p;
const int max_netdevices = 8*PAGE_SIZE;
2007-10-09 01:59:42 -07:00
unsigned long *inuse;
2005-04-16 15:20:36 -07:00
struct net_device *d;
p = strnchr(name, IFNAMSIZ-1, '%');
if (p) {
/*
* Verify the string as this thing may have come from
* the user. There must be either one "%d" and no other "%"
* characters.
*/
if (p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
/* Use one page as a bit array of possible slots */
2007-10-09 01:59:42 -07:00
inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
2005-04-16 15:20:36 -07:00
if (!inuse)
return -ENOMEM;
for_each_netdev(net, d) {
2005-04-16 15:20:36 -07:00
if (!sscanf(d->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
continue;
/* avoid cases where sscanf is not exact inverse of printf */
snprintf(buf, IFNAMSIZ, name, i);
2005-04-16 15:20:36 -07:00
if (!strncmp(buf, d->name, IFNAMSIZ))
set_bit(i, inuse);
}
i = find_first_zero_bit(inuse, max_netdevices);
free_page((unsigned long) inuse);
}
2009-11-18 02:36:59 +00:00
if (buf != name)
snprintf(buf, IFNAMSIZ, name, i);
if (!__dev_get_by_name(net, buf))
2005-04-16 15:20:36 -07:00
return i;
/* It is possible to run out of possible slots
* when the name is long and there isn't enough space left
* for the digits, or if all bits are used.
*/
return -ENFILE;
}
/**
* dev_alloc_name - allocate a name for a device
* @dev: device
* @name: name format string
*
* Passed a format string - eg "lt%d" it will try and find a suitable
* id. It scans list of devices to build up a free map, then chooses
* the first empty slot. The caller must hold the dev_base or rtnl lock
* while allocating the name and adding the device in order to avoid
* duplicates.
* Limited to bits_per_byte * page size devices (ie 32K on most platforms).
* Returns the number of the unit assigned or a negative errno code.
*/
int dev_alloc_name(struct net_device *dev, const char *name)
{
char buf[IFNAMSIZ];
struct net *net;
int ret;
BUG_ON(!dev_net(dev));
net = dev_net(dev);
ret = __dev_alloc_name(net, name, buf);
if (ret >= 0)
strlcpy(dev->name, buf, IFNAMSIZ);
return ret;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_alloc_name);
static int dev_get_valid_name(struct net_device *dev, const char *name)
2009-11-18 02:36:59 +00:00
{
2010-05-19 10:12:19 +00:00
struct net *net;
BUG_ON(!dev_net(dev));
net = dev_net(dev);
2009-11-18 02:36:59 +00:00
if (!dev_valid_name(name))
return -EINVAL;
if (strchr(name, '%'))
2010-05-19 10:12:19 +00:00
return dev_alloc_name(dev, name);
2009-11-18 02:36:59 +00:00
else if (__dev_get_by_name(net, name))
return -EEXIST;
2010-05-19 10:12:19 +00:00
else if (dev->name != name)
strlcpy(dev->name, name, IFNAMSIZ);
2009-11-18 02:36:59 +00:00
return 0;
}
2005-04-16 15:20:36 -07:00
/**
* dev_change_name - change name of a device
* @dev: device
* @newname: name (or format string) must be at least IFNAMSIZ
*
* Change name of a device, can pass format strings "eth%d".
* for wildcarding.
*/
2008-09-30 02:22:14 -07:00
int dev_change_name(struct net_device *dev, const char *newname)
2005-04-16 15:20:36 -07:00
{
char oldname[IFNAMSIZ];
2005-04-16 15:20:36 -07:00
int err = 0;
int ret;
struct net *net;
2005-04-16 15:20:36 -07:00
ASSERT_RTNL();
BUG_ON(!dev_net(dev));
2005-04-16 15:20:36 -07:00
net = dev_net(dev);
2005-04-16 15:20:36 -07:00
if (dev->flags & IFF_UP)
return -EBUSY;
if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
return 0;
memcpy(oldname, dev->name, IFNAMSIZ);
err = dev_get_valid_name(dev, newname);
2009-11-18 02:36:59 +00:00
if (err < 0)
return err;
2005-04-16 15:20:36 -07:00
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
return ret;
2008-05-14 22:33:38 -07:00
}
write_lock_bh(&dev_base_lock);
hlist_del_rcu(&dev->name_hlist);
2009-10-30 07:11:27 +00:00
write_unlock_bh(&dev_base_lock);
synchronize_rcu();
write_lock_bh(&dev_base_lock);
hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
write_unlock_bh(&dev_base_lock);
2007-09-16 15:42:43 -07:00
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
if (ret) {
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
printk(KERN_ERR
"%s: name change rollback failed: %d.\n",
dev->name, ret);
}
}
2005-04-16 15:20:36 -07:00
return err;
}
2008-09-22 21:28:11 -07:00
/**
* dev_set_alias - change ifalias of a device
* @dev: device
* @alias: name up to IFALIASZ
2008-09-30 02:23:58 -07:00
* @len: limit of bytes to copy from info
2008-09-22 21:28:11 -07:00
*
* Set ifalias for a device,
*/
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
ASSERT_RTNL();
if (len >= IFALIASZ)
return -EINVAL;
2008-09-23 21:23:19 -07:00
if (!len) {
if (dev->ifalias) {
kfree(dev->ifalias);
dev->ifalias = NULL;
}
return 0;
}
2009-09-03 01:29:39 -07:00
dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
2008-09-22 21:28:11 -07:00
if (!dev->ifalias)
return -ENOMEM;
strlcpy(dev->ifalias, alias, len+1);
return len;
}
2005-05-29 14:13:47 -07:00
/**
2006-05-26 13:25:24 -07:00
* netdev_features_change - device changes features
2005-05-29 14:13:47 -07:00
* @dev: device to cause notification
*
* Called to indicate a device has changed features.
*/
void netdev_features_change(struct net_device *dev)
{
2007-09-16 15:42:43 -07:00
call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
2005-05-29 14:13:47 -07:00
}
EXPORT_SYMBOL(netdev_features_change);
2005-04-16 15:20:36 -07:00
/**
* netdev_state_change - device changes state
* @dev: device to cause notification
*
* Called to indicate a device has changed state. This function calls
* the notifier chains for netdev_chain and sends a NEWLINK message
* to the routing socket.
*/
void netdev_state_change(struct net_device *dev)
{
if (dev->flags & IFF_UP) {
2007-09-16 15:42:43 -07:00
call_netdevice_notifiers(NETDEV_CHANGE, dev);
2005-04-16 15:20:36 -07:00
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
}
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(netdev_state_change);
2005-04-16 15:20:36 -07:00
int netdev_bonding_change(struct net_device *dev, unsigned long event)
2008-06-13 18:12:00 -07:00
{
return call_netdevice_notifiers(event, dev);
2008-06-13 18:12:00 -07:00
}
EXPORT_SYMBOL(netdev_bonding_change);
2005-04-16 15:20:36 -07:00
/**
* dev_load - load a network module
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @name: name of interface
*
* If a network interface is not present and the process has suitable
* privileges this function loads the module. If module loading is not
* available in this kernel then it becomes a nop.
*/
void dev_load(struct net *net, const char *name)
2005-04-16 15:20:36 -07:00
{
2007-02-09 23:24:36 +09:00
struct net_device *dev;
int no_module;
2005-04-16 15:20:36 -07:00
2009-10-30 07:11:27 +00:00
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
rcu_read_unlock();
2005-04-16 15:20:36 -07:00
no_module = !dev;
if (no_module && capable(CAP_NET_ADMIN))
no_module = request_module("netdev-%s", name);
if (no_module && capable(CAP_SYS_MODULE)) {
if (!request_module("%s", name))
pr_err("Loading kernel module for a network device "
"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
"instead\n", name);
}
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_load);
2005-04-16 15:20:36 -07:00
static int __dev_open(struct net_device *dev)
2005-04-16 15:20:36 -07:00
{
const struct net_device_ops *ops = dev->netdev_ops;
2009-05-30 01:39:53 +02:00
int ret;
2005-04-16 15:20:36 -07:00
ASSERT_RTNL();
2005-04-16 15:20:36 -07:00
if (!netif_device_present(dev))
return -ENODEV;
2009-05-30 01:39:53 +02:00
ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
ret = notifier_to_errno(ret);
if (ret)
return ret;
2005-04-16 15:20:36 -07:00
set_bit(__LINK_STATE_START, &dev->state);
if (ops->ndo_validate_addr)
ret = ops->ndo_validate_addr(dev);
if (!ret && ops->ndo_open)
ret = ops->ndo_open(dev);
2005-04-16 15:20:36 -07:00
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
else {
2005-04-16 15:20:36 -07:00
dev->flags |= IFF_UP;
net_dmaengine_get();
dev_set_rx_mode(dev);
2005-04-16 15:20:36 -07:00
dev_activate(dev);
}
2005-04-16 15:20:36 -07:00
return ret;
}
/**
* dev_open - prepare an interface for use.
* @dev: device to open
*
* Takes a device from down to up state. The device's private open
* function is invoked and then the multicast lists are loaded. Finally
* the device is moved into the up state and a %NETDEV_UP message is
* sent to the netdev notifier chain.
*
* Calling this function on an active interface is a nop. On a failure
* a negative errno code is returned.
*/
int dev_open(struct net_device *dev)
{
int ret;
if (dev->flags & IFF_UP)
return 0;
ret = __dev_open(dev);
if (ret < 0)
return ret;
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
call_netdevice_notifiers(NETDEV_UP, dev);
return ret;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_open);
2005-04-16 15:20:36 -07:00
static int __dev_close_many(struct list_head *head)
2005-04-16 15:20:36 -07:00
{
struct net_device *dev;
ASSERT_RTNL();
2007-09-12 14:33:25 +02:00
might_sleep();
list_for_each_entry(dev, head, unreg_list) {
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
2005-04-16 15:20:36 -07:00
clear_bit(__LINK_STATE_START, &dev->state);
2005-04-16 15:20:36 -07:00
/* Synchronize to scheduled poll. We cannot touch poll list, it
* can be even on different cpu. So just clear netif_running().
*
* dev->stop() will invoke napi_disable() on all of it's
* napi_struct instances on this device.
*/
smp_mb__after_clear_bit(); /* Commit netif_running(). */
}
2005-04-16 15:20:36 -07:00
dev_deactivate_many(head);
list_for_each_entry(dev, head, unreg_list) {
const struct net_device_ops *ops = dev->netdev_ops;
/*
* Call the device specific close. This cannot fail.
* Only if device is UP
*
* We allow it to be called even after a DETACH hot-plug
* event.
*/
if (ops->ndo_stop)
ops->ndo_stop(dev);
dev->flags &= ~IFF_UP;
net_dmaengine_put();
}
return 0;
}
static int __dev_close(struct net_device *dev)
{
2011-02-17 22:54:38 +00:00
int retval;
LIST_HEAD(single);
list_add(&dev->unreg_list, &single);
2011-02-17 22:54:38 +00:00
retval = __dev_close_many(&single);
list_del(&single);
return retval;
}
2011-01-19 21:23:22 +00:00
static int dev_close_many(struct list_head *head)
{
struct net_device *dev, *tmp;
LIST_HEAD(tmp_list);
list_for_each_entry_safe(dev, tmp, head, unreg_list)
if (!(dev->flags & IFF_UP))
list_move(&dev->unreg_list, &tmp_list);
__dev_close_many(head);
2008-02-12 23:10:11 -08:00
list_for_each_entry(dev, head, unreg_list) {
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
call_netdevice_notifiers(NETDEV_DOWN, dev);
}
/* rollback_registered_many needs the complete original list */
list_splice(&tmp_list, head);
2005-04-16 15:20:36 -07:00
return 0;
}
/**
* dev_close - shutdown an interface.
* @dev: device to shutdown
*
* This function moves an active device into down state. A
* %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
* is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
* chain.
*/
int dev_close(struct net_device *dev)
{
2011-05-10 12:26:06 -07:00
if (dev->flags & IFF_UP) {
LIST_HEAD(single);
2011-05-10 12:26:06 -07:00
list_add(&dev->unreg_list, &single);
dev_close_many(&single);
list_del(&single);
}
return 0;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_close);
2005-04-16 15:20:36 -07:00
/**
* dev_disable_lro - disable Large Receive Offload on a device
* @dev: device
*
* Disable Large Receive Offload (LRO) on a net device. Must be
* called under RTNL. This is needed if received packets may be
* forwarded to another interface.
*/
void dev_disable_lro(struct net_device *dev)
{
/*
* If we're trying to disable lro on a vlan device
* use the underlying physical device instead
*/
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
2011-11-15 15:29:55 +00:00
dev->wanted_features &= ~NETIF_F_LRO;
netdev_update_features(dev);
if (unlikely(dev->features & NETIF_F_LRO))
netdev_WARN(dev, "failed to disable LRO!\n");
}
EXPORT_SYMBOL(dev_disable_lro);
static int dev_boot_phase = 1;
2005-04-16 15:20:36 -07:00
/**
* register_netdevice_notifier - register a network notifier block
* @nb: notifier
*
* Register a notifier to be called when network device events occur.
* The notifier passed is linked into the kernel structures and must
* not be reused until it has been unregistered. A negative errno code
* is returned on a failure.
*
* When registered all registration and up events are replayed
2007-02-09 23:24:36 +09:00
* to the new notifier to allow device to have a race free
2005-04-16 15:20:36 -07:00
* view of the network device list.
*/
int register_netdevice_notifier(struct notifier_block *nb)
{
struct net_device *dev;
struct net_device *last;
struct net *net;
2005-04-16 15:20:36 -07:00
int err;
rtnl_lock();
2006-05-09 15:23:03 -07:00
err = raw_notifier_chain_register(&netdev_chain, nb);
if (err)
goto unlock;
if (dev_boot_phase)
goto unlock;
for_each_net(net) {
for_each_netdev(net, dev) {
err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
err = notifier_to_errno(err);
if (err)
goto rollback;
2005-04-16 15:20:36 -07:00
if (!(dev->flags & IFF_UP))
continue;
nb->notifier_call(nb, NETDEV_UP, dev);
}
2005-04-16 15:20:36 -07:00
}
unlock:
2005-04-16 15:20:36 -07:00
rtnl_unlock();
return err;
rollback:
last = dev;
for_each_net(net) {
for_each_netdev(net, dev) {
if (dev == last)
goto outroll;
if (dev->flags & IFF_UP) {
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
nb->notifier_call(nb, NETDEV_DOWN, dev);
}
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
}
}
outroll:
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(register_netdevice_notifier);
2005-04-16 15:20:36 -07:00
/**
* unregister_netdevice_notifier - unregister a network notifier block
* @nb: notifier
*
* Unregister a notifier previously registered by
* register_netdevice_notifier(). The notifier is unlinked into the
* kernel structures and may then be reused. A negative errno code
* is returned on a failure.
*/
int unregister_netdevice_notifier(struct notifier_block *nb)
{
int err;
rtnl_lock();
2006-05-09 15:23:03 -07:00
err = raw_notifier_chain_unregister(&netdev_chain, nb);
rtnl_unlock();
return err;
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(unregister_netdevice_notifier);
2005-04-16 15:20:36 -07:00
/**
* call_netdevice_notifiers - call all network notifier blocks
* @val: value passed unmodified to notifier function
* @dev: net_device pointer passed unmodified to notifier function
2005-04-16 15:20:36 -07:00
*
* Call all network notifier blocks. Parameters and return value
2006-05-09 15:23:03 -07:00
* are as for raw_notifier_call_chain().
2005-04-16 15:20:36 -07:00
*/
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2005-04-16 15:20:36 -07:00
{
ASSERT_RTNL();
return raw_notifier_call_chain(&netdev_chain, val, dev);
2005-04-16 15:20:36 -07:00
}
EXPORT_SYMBOL(call_netdevice_notifiers);
2005-04-16 15:20:36 -07:00
2011-11-15 04:12:55 +00:00
static struct jump_label_key netstamp_needed __read_mostly;
#ifdef HAVE_JUMP_LABEL
/* We are not allowed to call jump_label_dec() from irq context
* If net_disable_timestamp() is called from irq context, defer the
* jump_label_dec() calls.
*/
static atomic_t netstamp_needed_deferred;
#endif
2005-04-16 15:20:36 -07:00
void net_enable_timestamp(void)
{
#ifdef HAVE_JUMP_LABEL
int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
if (deferred) {
while (--deferred)
jump_label_dec(&netstamp_needed);
return;
}
#endif
WARN_ON(in_interrupt());
2011-11-15 04:12:55 +00:00
jump_label_inc(&netstamp_needed);
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(net_enable_timestamp);
2005-04-16 15:20:36 -07:00
void net_disable_timestamp(void)
{
#ifdef HAVE_JUMP_LABEL
if (in_interrupt()) {
atomic_inc(&netstamp_needed_deferred);
return;
}
#endif
2011-11-15 04:12:55 +00:00
jump_label_dec(&netstamp_needed);
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(net_disable_timestamp);
2005-04-16 15:20:36 -07:00
2010-05-15 23:57:10 -07:00
static inline void net_timestamp_set(struct sk_buff *skb)
2005-04-16 15:20:36 -07:00
{
2011-11-15 04:12:55 +00:00
skb->tstamp.tv64 = 0;
if (static_branch(&netstamp_needed))
__net_timestamp(skb);
2005-04-16 15:20:36 -07:00
}
2011-11-15 04:12:55 +00:00
#define net_timestamp_check(COND, SKB) \
if (static_branch(&netstamp_needed)) { \
if ((COND) && !(SKB)->tstamp.tv64) \
__net_timestamp(SKB); \
} \
2010-05-15 23:57:10 -07:00
2011-10-19 17:00:35 -04:00
static int net_hwtstamp_validate(struct ifreq *ifr)
{
struct hwtstamp_config cfg;
enum hwtstamp_tx_types tx_type;
enum hwtstamp_rx_filters rx_filter;
int tx_type_valid = 0;
int rx_filter_valid = 0;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
if (cfg.flags) /* reserved for future extensions */
return -EINVAL;
tx_type = cfg.tx_type;
rx_filter = cfg.rx_filter;
switch (tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_ONESTEP_SYNC:
tx_type_valid = 1;
break;
}
switch (rx_filter) {
case HWTSTAMP_FILTER_NONE:
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
rx_filter_valid = 1;
break;
}
if (!tx_type_valid || !rx_filter_valid)
return -ERANGE;
return 0;
}
2011-03-30 02:42:17 -07:00
static inline bool is_skb_forwardable(struct net_device *dev,
struct sk_buff *skb)
{
unsigned int len;
if (!(dev->flags & IFF_UP))
return false;
len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
if (skb->len <= len)
return true;
/* if TSO is enabled, we don't care about the length as the packet
* could be forwarded without being segmented before
*/
if (skb_is_gso(skb))
return true;
return false;
}
/**
* dev_forward_skb - loopback an skb to another netif
*
* @dev: destination network device
* @skb: buffer to forward
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped, but freed)
*
* dev_forward_skb can be used for injecting an skb from the
* start_xmit function of one device into the receive queue
* of another device.
*
* The receiving device may be in another namespace, so
* we have to clear all information in the skb that could
* impact namespace isolation.
*/
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
}
skb_orphan(skb);
2010-07-22 09:54:47 +00:00
nf_reset(skb);
2011-03-30 02:42:17 -07:00
if (unlikely(!is_skb_forwardable(dev, skb))) {
2010-09-30 21:06:55 +00:00
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
static inline int deliver_skb(struct sk_buff *skb,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{
atomic_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
2005-04-16 15:20:36 -07:00
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
*/
2006-06-22 02:57:17 -07:00
static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2005-04-16 15:20:36 -07:00
{
struct packet_type *ptype;
struct sk_buff *skb2 = NULL;
struct packet_type *pt_prev = NULL;
2005-04-16 15:20:36 -07:00
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
/* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org)
*/
if ((ptype->dev == dev || !ptype->dev) &&
(ptype->af_packet_priv == NULL ||
(struct sock *)ptype->af_packet_priv != skb->sk)) {
if (pt_prev) {
deliver_skb(skb2, pt_prev, skb->dev);
pt_prev = ptype;
continue;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
2005-04-16 15:20:36 -07:00
if (!skb2)
break;
net_timestamp_set(skb2);
2005-04-16 15:20:36 -07:00
/* skb->nh should be correctly
set by sender, so that the second statement is
just protection against buggy protocols.
*/
skb_reset_mac_header(skb2);
2005-04-16 15:20:36 -07:00
2007-04-10 20:50:43 -07:00
if (skb_network_header(skb2) < skb2->data ||
skb2->network_header > skb2->tail) {
2005-04-16 15:20:36 -07:00
if (net_ratelimit())
printk(KERN_CRIT "protocol %04x is "
"buggy, dev %s\n",
2010-06-30 10:39:19 -07:00
ntohs(skb2->protocol),
dev->name);
skb_reset_network_header(skb2);
2005-04-16 15:20:36 -07:00
}
skb2->transport_header = skb2->network_header;
2005-04-16 15:20:36 -07:00
skb2->pkt_type = PACKET_OUTGOING;
pt_prev = ptype;
2005-04-16 15:20:36 -07:00
}
}
if (pt_prev)
pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2005-04-16 15:20:36 -07:00
rcu_read_unlock();
}
2011-01-17 08:06:04 +00:00
/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
* @dev: Network device
* @txq: number of queues available
*
* If real_num_tx_queues is changed the tc mappings may no longer be
* valid. To resolve this verify the tc mapping remains valid and if
* not NULL the mapping. With no priorities mapping to this
* offset/count pair it will no longer be used. In the worst case TC0
* is invalid nothing can be done so disable priority mappings. If is
* expected that drivers will fix this mapping if they can before
* calling netif_set_real_num_tx_queues.
*/
2011-01-20 19:18:08 +00:00
static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2011-01-17 08:06:04 +00:00
{
int i;
struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
/* If TC0 is invalidated disable TC mapping */
if (tc->offset + tc->count > txq) {
pr_warning("Number of in use tx queues changed "
"invalidating tc mappings. Priority "
"traffic classification disabled!\n");
dev->num_tc = 0;
return;
}
/* Invalidated prio to tc mappings set to TC0 */
for (i = 1; i < TC_BITMASK + 1; i++) {
int q = netdev_get_prio_tc_map(dev, i);
tc = &dev->tc_to_txq[q];
if (tc->offset + tc->count > txq) {
pr_warning("Number of in use tx queues "
"changed. Priority %i to tc "
"mapping %i is no longer valid "
"setting map to 0\n",
i, q);
netdev_set_prio_tc_map(dev, i, 0);
}
}
}
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
*/
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
2010-11-21 13:17:27 +00:00
int rc;
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING) {
ASSERT_RTNL();
2010-11-21 13:17:27 +00:00
rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
txq);
2010-11-26 08:36:09 +00:00
if (rc)
return rc;
2011-01-17 08:06:04 +00:00
if (dev->num_tc)
netif_setup_tc(dev, txq);
if (txq < dev->real_num_tx_queues)
qdisc_reset_all_tx_gt(dev, txq);
}
dev->real_num_tx_queues = txq;
return 0;
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
#ifdef CONFIG_RPS
/**
* netif_set_real_num_rx_queues - set actual number of RX queues used
* @dev: Network device
* @rxq: Actual number of RX queues
*
* This must be called either with the rtnl_lock held or before
* registration of the net device. Returns 0 on success, or a
* negative error code. If called before registration, it always
* succeeds.
*/
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
{
int rc;
2010-10-18 18:00:16 +00:00
if (rxq < 1 || rxq > dev->num_rx_queues)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED) {
ASSERT_RTNL();
rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
rxq);
if (rc)
return rc;
}
dev->real_num_rx_queues = rxq;
return 0;
}
EXPORT_SYMBOL(netif_set_real_num_rx_queues);
#endif
static inline void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
unsigned long flags;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
q->next_sched = NULL;
*sd->output_queue_tailp = q;
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
void __netif_schedule(struct Qdisc *q)
{
if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
__netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
void dev_kfree_skb_irq(struct sk_buff *skb)
{
2010-08-03 00:24:04 -07:00
if (atomic_dec_and_test(&skb->users)) {
struct softnet_data *sd;
unsigned long flags;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
skb->next = sd->completion_queue;
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
}
EXPORT_SYMBOL(dev_kfree_skb_irq);
void dev_kfree_skb_any(struct sk_buff *skb)
{
if (in_irq() || irqs_disabled())
dev_kfree_skb_irq(skb);
else
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(dev_kfree_skb_any);
/**
* netif_device_detach - mark device as removed
* @dev: network device
*
* Mark device as removed from system and therefore no longer available.
*/
void netif_device_detach(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_tx_stop_all_queues(dev);
}
}
EXPORT_SYMBOL(netif_device_detach);
/**
* netif_device_attach - mark device as attached
* @dev: network device
*
* Mark device as attached from system and restart if needed.
*/
void netif_device_attach(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
netif_tx_wake_all_queues(dev);
2007-02-09 23:24:36 +09:00
__netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_device_attach);
/**
* skb_dev_set -- assign a new device to a buffer
* @skb: buffer for the new device
* @dev: network device
*
* If an skb is owned by a device already, we have to reset
* all data private to the namespace a device belongs to
* before assigning it a new device.
*/
#ifdef CONFIG_NET_NS
void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
skb_dst_drop(skb);
if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
secpath_reset(skb);
nf_reset(skb);
skb_init_secmark(skb);
skb->mark = 0;
skb->priority = 0;
skb->nf_trace = 0;
skb->ipvs_property = 0;
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
#endif
}
skb->dev = dev;
}
EXPORT_SYMBOL(skb_set_dev);
#endif /* CONFIG_NET_NS */
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
const char *driver = "";
if (dev && dev->dev.parent)
driver = dev_driver_string(dev->dev.parent);
WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
"gso_type=%d ip_summed=%d\n",
driver, dev ? &dev->features : NULL,
skb->sk ? &skb->sk->sk_route_caps : NULL,
skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
skb_shinfo(skb)->gso_type, skb->ip_summed);
}
2005-04-16 15:20:36 -07:00
/*
* Invalidate hardware checksum when packet is to be mangled, and
* complete checksum manually on outgoing path.
*/
int skb_checksum_help(struct sk_buff *skb)
2005-04-16 15:20:36 -07:00
{
__wsum csum;
int ret = 0, offset;
2005-04-16 15:20:36 -07:00
if (skb->ip_summed == CHECKSUM_COMPLETE)
goto out_set_summed;
if (unlikely(skb_shinfo(skb)->gso_size)) {
skb_warn_bad_offload(skb);
return -EINVAL;
2005-04-16 15:20:36 -07:00
}
2010-12-14 15:24:08 +00:00
offset = skb_checksum_start_offset(skb);
BUG_ON(offset >= skb_headlen(skb));
csum = skb_checksum(skb, offset, skb->len - offset, 0);
offset += skb->csum_offset;
BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
if (skb_cloned(skb) &&
!skb_clone_writable(skb, offset + sizeof(__sum16))) {
2005-04-16 15:20:36 -07:00
ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (ret)
goto out;
}
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
out_set_summed:
2005-04-16 15:20:36 -07:00
skb->ip_summed = CHECKSUM_NONE;
2007-02-09 23:24:36 +09:00
out:
2005-04-16 15:20:36 -07:00
return ret;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(skb_checksum_help);
2005-04-16 15:20:36 -07:00
2006-06-22 02:57:17 -07:00
/**
* skb_gso_segment - Perform segmentation on skb.
* @skb: buffer to segment
2006-06-27 13:22:38 -07:00
* @features: features for the output path (see dev->features)
2006-06-22 02:57:17 -07:00
*
* This function segments the given skb and returns a list of segments.
2006-06-27 13:22:38 -07:00
*
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
2006-06-22 02:57:17 -07:00
*/
struct sk_buff *skb_gso_segment(struct sk_buff *skb,
netdev_features_t features)
2006-06-22 02:57:17 -07:00
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
2006-11-14 20:48:11 -08:00
__be16 type = skb->protocol;
int vlan_depth = ETH_HLEN;
int err;
2006-06-22 02:57:17 -07:00
while (type == htons(ETH_P_8021Q)) {
struct vlan_hdr *vh;
if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
return ERR_PTR(-EINVAL);
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
}
skb_reset_mac_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
2006-06-22 02:57:17 -07:00
__skb_pull(skb, skb->mac_len);
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
skb_warn_bad_offload(skb);
if (skb_header_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
return ERR_PTR(err);
}
2006-06-22 02:57:17 -07:00
rcu_read_lock();
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2006-06-22 02:57:17 -07:00
if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
err = ptype->gso_send_check(skb);
segs = ERR_PTR(err);
if (err || skb_gso_ok(skb, features))
break;
2007-04-10 20:50:43 -07:00
__skb_push(skb, (skb->data -
skb_network_header(skb)));
}
2006-06-27 13:22:38 -07:00
segs = ptype->gso_segment(skb, features);
2006-06-22 02:57:17 -07:00
break;
}
}
rcu_read_unlock();
2007-03-19 15:33:04 -07:00
__skb_push(skb, skb->data - skb_mac_header(skb));
2006-06-27 13:22:38 -07:00
2006-06-22 02:57:17 -07:00
return segs;
}
EXPORT_SYMBOL(skb_gso_segment);
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
void netdev_rx_csum_fault(struct net_device *dev)
{
if (net_ratelimit()) {
2007-02-09 23:24:36 +09:00
printk(KERN_ERR "%s: hw csum failure.\n",
dev ? dev->name : "<unknown>");
dump_stack();
}
}
EXPORT_SYMBOL(netdev_rx_csum_fault);
#endif
2005-04-16 15:20:36 -07:00
/* Actually, we should eliminate this check as soon as we know, that:
* 1. IOMMU is present and allows to map all the memory.
* 2. No high memory really exists on this machine.
*/
2010-04-02 13:34:49 -07:00
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2005-04-16 15:20:36 -07:00
{
2006-06-27 13:33:10 -07:00
#ifdef CONFIG_HIGHMEM
2005-04-16 15:20:36 -07:00
int i;
2010-03-30 22:35:50 +00:00
if (!(dev->features & NETIF_F_HIGHDMA)) {
2011-08-22 23:44:58 +00:00
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (PageHighMem(skb_frag_page(frag)))
2010-03-30 22:35:50 +00:00
return 1;
2011-08-22 23:44:58 +00:00
}
2010-03-30 22:35:50 +00:00
}
2005-04-16 15:20:36 -07:00
2010-03-30 22:35:50 +00:00
if (PCI_DMA_BUS_IS_PHYS) {
struct device *pdev = dev->dev.parent;
2005-04-16 15:20:36 -07:00
2010-04-02 13:34:49 -07:00
if (!pdev)
return 0;
2010-03-30 22:35:50 +00:00
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2011-08-22 23:44:58 +00:00
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2010-03-30 22:35:50 +00:00
if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
return 1;
}
}
2006-06-27 13:33:10 -07:00
#endif
2005-04-16 15:20:36 -07:00
return 0;
}
2006-06-22 02:57:17 -07:00
struct dev_gso_cb {
void (*destructor)(struct sk_buff *skb);
};
#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
static void dev_gso_skb_destructor(struct sk_buff *skb)
{
struct dev_gso_cb *cb;
do {
struct sk_buff *nskb = skb->next;
skb->next = nskb->next;
nskb->next = NULL;
kfree_skb(nskb);
} while (skb->next);
cb = DEV_GSO_CB(skb);
if (cb->destructor)
cb->destructor(skb);
}
/**
* dev_gso_segment - Perform emulated hardware segmentation on skb.
* @skb: buffer to segment
* @features: device features as applicable to this skb
2006-06-22 02:57:17 -07:00
*
* This function segments the given skb and stores the list of segments
* in skb->next.
*/
static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2006-06-22 02:57:17 -07:00
{
struct sk_buff *segs;
2006-06-27 13:22:38 -07:00
segs = skb_gso_segment(skb, features);
/* Verifying header integrity only. */
if (!segs)
return 0;
2006-06-22 02:57:17 -07:00
2008-04-29 01:03:09 -07:00
if (IS_ERR(segs))
2006-06-22 02:57:17 -07:00
return PTR_ERR(segs);
skb->next = segs;
DEV_GSO_CB(skb)->destructor = skb->destructor;
skb->destructor = dev_gso_skb_destructor;
return 0;
}
2010-04-16 12:18:22 +00:00
/*
* Try to orphan skb early, right before transmission by the device.
2010-08-17 08:59:14 +00:00
* We cannot orphan skb if tx timestamp is requested or the sk-reference
* is needed on driver level for other reasons, e.g. see net/can/raw.c
2010-04-16 12:18:22 +00:00
*/
static inline void skb_orphan_try(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
2010-08-17 08:59:14 +00:00
if (sk && !skb_shinfo(skb)->tx_flags) {
/* skb_tx_hash() wont be able to get sk.
* We copy sk_hash into skb->rxhash
*/
if (!skb->rxhash)
skb->rxhash = sk->sk_hash;
2010-04-16 12:18:22 +00:00
skb_orphan(skb);
}
2010-04-16 12:18:22 +00:00
}
static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
{
return ((features & NETIF_F_GEN_CSUM) ||
((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
}
static netdev_features_t harmonize_features(struct sk_buff *skb,
__be16 protocol, netdev_features_t features)
{
if (!can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
features &= ~NETIF_F_SG;
} else if (illegal_highdma(skb->dev, skb)) {
features &= ~NETIF_F_SG;
}
return features;
}
netdev_features_t netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
netdev_features_t features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
} else if (!vlan_tx_tag_present(skb)) {
return harmonize_features(skb, protocol, features);
}
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
if (protocol != htons(ETH_P_8021Q)) {
return harmonize_features(skb, protocol, features);
} else {
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
return harmonize_features(skb, protocol, features);
}
}
EXPORT_SYMBOL(netif_skb_features);
2010-06-16 14:18:12 +00:00
/*
* Returns true if either:
* 1. skb has frag_list and the device doesn't support FRAGLIST, or
* 2. skb is fragmented and the device does not support SG, or if
* at least one of fragments is in highmem and device does not
* support DMA from it.
*/
static inline int skb_needs_linearize(struct sk_buff *skb,
int features)
2010-06-16 14:18:12 +00:00
{
return skb_is_nonlinear(skb) &&
((skb_has_frag_list(skb) &&
!(features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags &&
!(features & NETIF_F_SG)));
2010-06-16 14:18:12 +00:00
}
2008-07-17 01:56:23 -07:00
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
2006-06-22 02:57:17 -07:00
{
2008-11-20 20:14:53 -08:00
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
unsigned int skb_len;
2008-11-20 20:14:53 -08:00
2006-06-22 02:57:17 -07:00
if (likely(!skb->next)) {
netdev_features_t features;
/*
2011-03-30 22:57:33 -03:00
* If device doesn't need skb->dst, release it right now while
* its hot in this cpu cache
*/
2009-06-02 05:19:30 +00:00
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
2010-04-16 12:18:22 +00:00
skb_orphan_try(skb);
features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) &&
!(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb))
goto out;
skb->vlan_tci = 0;
}
if (netif_needs_gso(skb, features)) {
if (unlikely(dev_gso_segment(skb, features)))
goto out_kfree_skb;
if (skb->next)
goto gso;
2010-06-16 14:18:12 +00:00
} else {
if (skb_needs_linearize(skb, features) &&
2010-06-16 14:18:12 +00:00
__skb_linearize(skb))
goto out_kfree_skb;
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
2010-12-14 15:24:08 +00:00
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
if (!(features & NETIF_F_ALL_CSUM) &&
2010-06-16 14:18:12 +00:00
skb_checksum_help(skb))
goto out_kfree_skb;
}
}
skb_len = skb->len;
rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc, dev, skb_len);
if (rc == NETDEV_TX_OK)
2009-05-25 22:58:01 -07:00
txq_trans_update(txq);
return rc;
2006-06-22 02:57:17 -07:00
}
2006-06-27 13:22:38 -07:00
gso:
2006-06-22 02:57:17 -07:00
do {
struct sk_buff *nskb = skb->next;
skb->next = nskb->next;
nskb->next = NULL;
/*
2011-03-30 22:57:33 -03:00
* If device doesn't need nskb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(nskb);
skb_len = nskb->len;
2008-11-20 20:14:53 -08:00
rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
nskb->next = skb->next;
2006-06-22 02:57:17 -07:00
skb->next = nskb;
return rc;
}
2009-05-25 22:58:01 -07:00
txq_trans_update(txq);
2011-11-28 16:32:44 +00:00
if (unlikely(netif_xmit_stopped(txq) && skb->next))
return NETDEV_TX_BUSY;
2006-06-22 02:57:17 -07:00
} while (skb->next);
2007-02-09 23:24:36 +09:00
out_kfree_gso_skb:
if (likely(skb->next == NULL))
skb->destructor = DEV_GSO_CB(skb)->destructor;
2006-06-22 02:57:17 -07:00
out_kfree_skb:
kfree_skb(skb);
out:
return rc;
2006-06-22 02:57:17 -07:00
}
2010-03-16 08:03:29 +00:00
static u32 hashrnd __read_mostly;
2008-07-21 09:48:06 -07:00
/*
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
* to be used as a distribution range.
*/
u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
unsigned int num_tx_queues)
2008-07-15 03:47:03 -07:00
{
2009-01-27 16:34:47 -08:00
u32 hash;
2011-01-17 08:06:04 +00:00
u16 qoffset = 0;
u16 qcount = num_tx_queues;
2008-07-21 09:48:06 -07:00
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
while (unlikely(hash >= num_tx_queues))
hash -= num_tx_queues;
return hash;
}
2011-01-17 08:06:04 +00:00
if (dev->num_tc) {
u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
qoffset = dev->tc_to_txq[tc].offset;
qcount = dev->tc_to_txq[tc].count;
}
if (skb->sk && skb->sk->sk_hash)
2009-01-27 16:34:47 -08:00
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^ skb->rxhash;
2010-03-16 08:03:29 +00:00
hash = jhash_1word(hash, hashrnd);
2011-01-17 08:06:04 +00:00
return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2008-07-15 03:47:03 -07:00
}
EXPORT_SYMBOL(__skb_tx_hash);
2008-07-15 03:47:03 -07:00
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
{
if (unlikely(queue_index >= dev->real_num_tx_queues)) {
if (net_ratelimit()) {
2010-04-08 21:26:13 +00:00
pr_warning("%s selects TX queue %d, but "
"real number of TX queues is %d\n",
dev->name, queue_index, dev->real_num_tx_queues);
}
return 0;
}
return queue_index;
}
2010-11-21 13:17:27 +00:00
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
2010-11-26 08:36:09 +00:00
#ifdef CONFIG_XPS
2010-11-21 13:17:27 +00:00
struct xps_dev_maps *dev_maps;
struct xps_map *map;
int queue_index = -1;
rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_maps);
if (dev_maps) {
map = rcu_dereference(
dev_maps->cpu_map[raw_smp_processor_id()]);
if (map) {
if (map->len == 1)
queue_index = map->queues[0];
else {
u32 hash;
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^
skb->rxhash;
hash = jhash_1word(hash, hashrnd);
queue_index = map->queues[
((u64)hash * map->len) >> 32];
}
if (unlikely(queue_index >= dev->real_num_tx_queues))
queue_index = -1;
}
}
rcu_read_unlock();
return queue_index;
#else
return -1;
#endif
}
2008-07-17 00:34:19 -07:00
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
2010-07-14 20:50:29 -07:00
int queue_index;
const struct net_device_ops *ops = dev->netdev_ops;
2008-07-17 01:56:23 -07:00
2010-11-21 13:17:29 +00:00
if (dev->real_num_tx_queues == 1)
queue_index = 0;
else if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb);
queue_index = dev_cap_txqueue(dev, queue_index);
} else {
struct sock *sk = skb->sk;
queue_index = sk_tx_queue_get(sk);
2010-11-21 13:17:29 +00:00
if (queue_index < 0 || skb->ooo_okay ||
queue_index >= dev->real_num_tx_queues) {
int old_index = queue_index;
2010-11-21 13:17:27 +00:00
queue_index = get_xps_queue(dev, skb);
if (queue_index < 0)
queue_index = skb_tx_hash(dev, skb);
2010-11-21 13:17:29 +00:00
if (queue_index != old_index && sk) {
struct dst_entry *dst =
rcu_dereference_check(sk->sk_dst_cache, 1);
2010-04-11 21:18:17 +00:00
if (dst && skb_dst(skb) == dst)
sk_tx_queue_set(sk, queue_index);
}
}
}
2008-07-15 03:03:33 -07:00
2008-07-17 01:56:23 -07:00
skb_set_queue_mapping(skb, queue_index);
return netdev_get_tx_queue(dev, queue_index);
2008-07-17 00:34:19 -07:00
}
2009-08-06 01:44:21 +00:00
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
2011-01-20 03:48:19 +00:00
bool contended;
2009-08-06 01:44:21 +00:00
int rc;
2011-01-20 03:48:19 +00:00
qdisc_skb_cb(skb)->pkt_len = skb->len;
qdisc_calculate_pkt_len(skb, q);
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits __QDISC_STATE_RUNNING owner to get the lock more often
* and dequeue packets faster.
*/
2011-01-20 03:48:19 +00:00
contended = qdisc_is_running(q);
if (unlikely(contended))
spin_lock(&q->busylock);
2009-08-06 01:44:21 +00:00
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
qdisc_run_begin(q)) {
2009-08-06 01:44:21 +00:00
/*
* This is a work-conserving queue; there are no old skbs
* waiting to be sent out; and the qdisc is not running -
* xmit the skb directly.
*/
2010-05-11 23:19:48 +00:00
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
2011-01-09 08:30:54 +00:00
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
2009-08-06 01:44:21 +00:00
__qdisc_run(q);
} else
qdisc_run_end(q);
2009-08-06 01:44:21 +00:00
rc = NET_XMIT_SUCCESS;
} else {
2010-05-11 23:19:48 +00:00
skb_dst_force(skb);
2011-01-20 03:48:19 +00:00
rc = q->enqueue(skb, q) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
}
__qdisc_run(q);
}
2009-08-06 01:44:21 +00:00
}
spin_unlock(root_lock);
if (unlikely(contended))
spin_unlock(&q->busylock);
2009-08-06 01:44:21 +00:00
return rc;
}
#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
static void skb_update_prio(struct sk_buff *skb)
{
2011-11-25 07:44:54 +00:00
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
if ((!skb->priority) && (skb->sk) && map)
skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
}
#else
#define skb_update_prio(skb)
#endif
2010-09-29 13:23:09 -07:00
static DEFINE_PER_CPU(int, xmit_recursion);
2010-10-25 12:51:55 -07:00
#define RECURSION_LIMIT 10
2010-09-29 13:23:09 -07:00
2008-07-22 14:09:06 -07:00
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
* this function. The function can be called from an interrupt.
*
* A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*
* -----------------------------------------------------------------------------------
* I notice this method can also return errors from the queue disciplines,
* including NET_XMIT_DROP, which is a positive value. So, errors can also
* be positive.
*
* Regardless of the return value, the skb is consumed, so it is currently
* difficult to retry a send to this method. (You can bump the ref count
* before sending to hold a reference for retry if you are careful.)
*
* When calling this method, interrupts MUST be enabled. This is because
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
2005-04-16 15:20:36 -07:00
int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
2005-04-16 15:20:36 -07:00
struct Qdisc *q;
int rc = -ENOMEM;
2007-02-09 23:24:36 +09:00
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
2005-04-16 15:20:36 -07:00
*/
2007-02-09 23:24:36 +09:00
rcu_read_lock_bh();
2005-04-16 15:20:36 -07:00
skb_update_prio(skb);
2008-07-15 03:03:33 -07:00
txq = dev_pick_tx(dev, skb);
q = rcu_dereference_bh(txq->qdisc);
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_NET_CLS_ACT
2009-09-03 01:29:39 -07:00
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2005-04-16 15:20:36 -07:00
#endif
2010-08-23 18:45:02 +09:00
trace_net_dev_queue(skb);
2005-04-16 15:20:36 -07:00
if (q->enqueue) {
2009-08-06 01:44:21 +00:00
rc = __dev_xmit_skb(skb, q, dev, txq);
goto out;
2005-04-16 15:20:36 -07:00
}
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
2006-06-09 12:20:56 -07:00
Really, it is unlikely that netif_tx_lock protection is necessary
here. (f.e. loopback and IP tunnels are clean ignoring statistics
2005-04-16 15:20:36 -07:00
counters.)
However, it is possible, that they rely on protection
made by us here.
Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8)
*/
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
2005-04-16 15:20:36 -07:00
2010-09-29 13:23:09 -07:00
if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
goto recursion_alert;
HARD_TX_LOCK(dev, txq, cpu);
2005-04-16 15:20:36 -07:00
2011-11-28 16:32:44 +00:00
if (!netif_xmit_stopped(txq)) {
2010-09-29 13:23:09 -07:00
__this_cpu_inc(xmit_recursion);
rc = dev_hard_start_xmit(skb, dev, txq);
2010-09-29 13:23:09 -07:00
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
2005-04-16 15:20:36 -07:00
goto out;
}
}
HARD_TX_UNLOCK(dev, txq);
2005-04-16 15:20:36 -07:00
if (net_ratelimit())
printk(KERN_CRIT "Virtual device %s asks to "
"queue packet!\n", dev->name);
} else {
/* Recursion is detected! It is possible,
2010-09-29 13:23:09 -07:00
* unfortunately
*/
recursion_alert:
2005-04-16 15:20:36 -07:00
if (net_ratelimit())
printk(KERN_CRIT "Dead loop on virtual device "
"%s, fix it urgently!\n", dev->name);
}
}
rc = -ENETDOWN;
rcu_read_unlock_bh();
2005-04-16 15:20:36 -07:00
kfree_skb(skb);
return rc;
out:
rcu_read_unlock_bh();
2005-04-16 15:20:36 -07:00
return rc;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_queue_xmit);
2005-04-16 15:20:36 -07:00
/*=======================================================================
Receiver routines
=======================================================================*/
2007-03-12 14:33:50 -07:00
int netdev_max_backlog __read_mostly = 1000;
2010-05-15 23:57:10 -07:00
int netdev_tstamp_prequeue __read_mostly = 1;
2007-03-12 14:33:50 -07:00
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */
2005-04-16 15:20:36 -07:00
2010-05-06 22:07:48 -07:00
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
struct napi_struct *napi)
{
list_add_tail(&napi->poll_list, &sd->poll_list);
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
/*
* __skb_get_rxhash: calculate a flow hash based on src/dst addresses
* and src/dst port numbers. Sets rxhash in skb to non-zero hash value
* on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
* if hash is a canonical 4-tuple hash over transport ports.
*/
void __skb_get_rxhash(struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
if (!skb_flow_dissect(skb, &keys))
return;
if (keys.ports) {
if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
swap(keys.port16[0], keys.port16[1]);
skb->l4_rxhash = 1;
}
/* get a consistent hash (same value on both flow directions) */
if ((__force u32)keys.dst < (__force u32)keys.src)
swap(keys.dst, keys.src);
hash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src,
(__force u32)keys.ports, hashrnd);
if (!hash)
hash = 1;
skb->rxhash = hash;
}
EXPORT_SYMBOL(__skb_get_rxhash);
2010-03-24 19:13:54 +00:00
#ifdef CONFIG_RPS
2010-04-16 16:01:27 -07:00
/* One global table that all flow-based protocols share. */
2010-10-25 03:02:02 +00:00
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2010-04-16 16:01:27 -07:00
EXPORT_SYMBOL(rps_sock_flow_table);
struct jump_label_key rps_needed __read_mostly;
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
if (next_cpu != RPS_NO_CPU) {
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *old_rflow;
u32 flow_id;
u16 rxq_index;
int rc;
/* Should we steer this flow to a different hardware queue? */
if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
!(dev->features & NETIF_F_NTUPLE))
goto out;
rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
if (rxq_index == skb_get_rx_queue(skb))
goto out;
rxqueue = dev->_rx + rxq_index;
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (!flow_table)
goto out;
flow_id = skb->rxhash & flow_table->mask;
rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
rxq_index, flow_id);
if (rc < 0)
goto out;
old_rflow = rflow;
rflow = &flow_table->flows[flow_id];
rflow->filter = rc;
if (old_rflow->filter == rflow->filter)
old_rflow->filter = RPS_NO_FILTER;
out:
#endif
rflow->last_qtail =
per_cpu(softnet_data, next_cpu).input_queue_head;
}
rflow->cpu = next_cpu;
return rflow;
}
2010-03-16 08:03:29 +00:00
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
2010-04-15 00:14:07 -07:00
* rcu_read_lock must be held on entry.
2010-03-16 08:03:29 +00:00
*/
2010-04-16 16:01:27 -07:00
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
2010-03-16 08:03:29 +00:00
{
struct netdev_rx_queue *rxqueue;
2010-10-25 03:02:02 +00:00
struct rps_map *map;
2010-04-16 16:01:27 -07:00
struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table;
2010-03-16 08:03:29 +00:00
int cpu = -1;
2010-04-16 16:01:27 -07:00
u16 tcpu;
2010-03-16 08:03:29 +00:00
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->real_num_rx_queues)) {
WARN_ONCE(dev->real_num_rx_queues > 1,
"%s received packet on queue %u, but number "
"of RX queues is %u\n",
dev->name, index, dev->real_num_rx_queues);
2010-03-16 08:03:29 +00:00
goto done;
}
rxqueue = dev->_rx + index;
} else
rxqueue = dev->_rx;
2010-10-25 03:02:02 +00:00
map = rcu_dereference(rxqueue->rps_map);
if (map) {
if (map->len == 1 &&
2011-08-11 19:30:52 +00:00
!rcu_access_pointer(rxqueue->rps_flow_table)) {
2010-09-03 23:12:37 +00:00
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
2010-03-16 08:03:29 +00:00
goto done;
2010-04-19 21:56:38 +00:00
}
2011-08-11 19:30:52 +00:00
} else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2010-03-16 08:03:29 +00:00
goto done;
}
skb_reset_network_header(skb);
if (!skb_get_rxhash(skb))
2010-03-16 08:03:29 +00:00
goto done;
2010-04-16 16:01:27 -07:00
flow_table = rcu_dereference(rxqueue->rps_flow_table);
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) {
u16 next_cpu;
struct rps_dev_flow *rflow;
rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
tcpu = rflow->cpu;
next_cpu = sock_flow_table->ents[skb->rxhash &
sock_flow_table->mask];
/*
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
* - Current CPU is unset (equal to RPS_NO_CPU).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
* This guarantees that all previous packets for the flow
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0))
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2010-04-16 16:01:27 -07:00
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
}
}
2010-03-16 08:03:29 +00:00
if (map) {
2010-04-16 16:01:27 -07:00
tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2010-03-16 08:03:29 +00:00
if (cpu_online(tcpu)) {
cpu = tcpu;
goto done;
}
}
done:
return cpu;
}
#ifdef CONFIG_RFS_ACCEL
/**
* rps_may_expire_flow - check whether an RFS hardware filter may be removed
* @dev: Device on which the filter was set
* @rxq_index: RX queue index
* @flow_id: Flow ID passed to ndo_rx_flow_steer()
* @filter_id: Filter ID returned by ndo_rx_flow_steer()
*
* Drivers that implement ndo_rx_flow_steer() should periodically call
* this function for each installed filter and remove the filters for
* which it returns %true.
*/
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
u32 flow_id, u16 filter_id)
{
struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rflow;
bool expire = true;
int cpu;
rcu_read_lock();
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
cpu = ACCESS_ONCE(rflow->cpu);
if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) <
(int)(10 * flow_table->mask)))
expire = false;
}
rcu_read_unlock();
return expire;
}
EXPORT_SYMBOL(rps_may_expire_flow);
#endif /* CONFIG_RFS_ACCEL */
2010-03-16 08:03:29 +00:00
/* Called from hardirq (IPI) context */
2010-04-19 21:17:14 +00:00
static void rps_trigger_softirq(void *data)
2010-03-16 08:03:29 +00:00
{
2010-04-19 21:17:14 +00:00
struct softnet_data *sd = data;
2010-05-06 22:07:48 -07:00
____napi_schedule(sd, &sd->backlog);
2010-05-02 05:42:16 +00:00
sd->received_rps++;
2010-03-16 08:03:29 +00:00
}
2010-04-19 21:17:14 +00:00
2010-04-16 16:01:27 -07:00
#endif /* CONFIG_RPS */
2010-03-16 08:03:29 +00:00
2010-04-19 21:17:14 +00:00
/*
* Check if this softnet_data structure is another cpu one
* If yes, queue it to our IPI list and return 1
* If no, return 0
*/
static int rps_ipi_queued(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
struct softnet_data *mysd = &__get_cpu_var(softnet_data);
if (sd != mysd) {
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
return 1;
}
#endif /* CONFIG_RPS */
return 0;
}
2010-03-16 08:03:29 +00:00
/*
* enqueue_to_backlog is called to queue an skb to a per CPU backlog
* queue (may be a remote CPU queue).
*/
2010-04-16 16:01:27 -07:00
static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
unsigned int *qtail)
2010-03-16 08:03:29 +00:00
{
2010-04-19 21:17:14 +00:00
struct softnet_data *sd;
2010-03-16 08:03:29 +00:00
unsigned long flags;
2010-04-19 21:17:14 +00:00
sd = &per_cpu(softnet_data, cpu);
2010-03-16 08:03:29 +00:00
local_irq_save(flags);
2010-04-19 21:17:14 +00:00
rps_lock(sd);
if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
if (skb_queue_len(&sd->input_pkt_queue)) {
2010-03-16 08:03:29 +00:00
enqueue:
2010-04-19 21:17:14 +00:00
__skb_queue_tail(&sd->input_pkt_queue, skb);
input_queue_tail_incr_save(sd, qtail);
2010-04-19 21:17:14 +00:00
rps_unlock(sd);
local_irq_restore(flags);
2010-03-16 08:03:29 +00:00
return NET_RX_SUCCESS;
}
2010-05-06 23:51:21 +00:00
/* Schedule NAPI for backlog device
* We can use non atomic operation since we own the queue lock
*/
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2010-04-19 21:17:14 +00:00
if (!rps_ipi_queued(sd))
2010-05-06 22:07:48 -07:00
____napi_schedule(sd, &sd->backlog);
2010-03-16 08:03:29 +00:00
}
goto enqueue;
}
2010-05-02 05:42:16 +00:00
sd->dropped++;
2010-04-19 21:17:14 +00:00
rps_unlock(sd);
2010-03-16 08:03:29 +00:00
local_irq_restore(flags);
2010-09-30 21:06:55 +00:00
atomic_long_inc(&skb->dev->rx_dropped);
2010-03-16 08:03:29 +00:00
kfree_skb(skb);
return NET_RX_DROP;
}
2005-04-16 15:20:36 -07:00
/**
* netif_rx - post buffer to the network code
* @skb: buffer to post
*
* This function receives a packet from a device driver and queues it for
* the upper (protocol) levels to process. It always succeeds. The buffer
* may be dropped during processing for congestion control or by the
* protocol layers.
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped)
*
*/
int netif_rx(struct sk_buff *skb)
{
2010-04-15 00:14:07 -07:00
int ret;
2005-04-16 15:20:36 -07:00
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
return NET_RX_DROP;
2011-11-15 04:12:55 +00:00
net_timestamp_check(netdev_tstamp_prequeue, skb);
2005-04-16 15:20:36 -07:00
2010-08-23 18:45:02 +09:00
trace_netif_rx(skb);
2010-03-24 19:13:54 +00:00
#ifdef CONFIG_RPS
if (static_branch(&rps_needed)) {
2010-04-16 16:01:27 -07:00
struct rps_dev_flow voidflow, *rflow = &voidflow;
2010-04-15 00:14:07 -07:00
int cpu;
2005-04-16 15:20:36 -07:00
preempt_disable();
2010-04-15 00:14:07 -07:00
rcu_read_lock();
2010-04-16 16:01:27 -07:00
cpu = get_rps_cpu(skb->dev, skb, &rflow);
2010-04-15 00:14:07 -07:00
if (cpu < 0)
cpu = smp_processor_id();
2010-04-16 16:01:27 -07:00
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2010-04-15 00:14:07 -07:00
rcu_read_unlock();
preempt_enable();
} else
#endif
2010-04-16 16:01:27 -07:00
{
unsigned int qtail;
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
put_cpu();
}
2010-04-15 00:14:07 -07:00
return ret;
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(netif_rx);
2005-04-16 15:20:36 -07:00
int netif_rx_ni(struct sk_buff *skb)
{
int err;
preempt_disable();
err = netif_rx(skb);
if (local_softirq_pending())
do_softirq();
preempt_enable();
return err;
}
EXPORT_SYMBOL(netif_rx_ni);
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
if (sd->completion_queue) {
struct sk_buff *clist;
local_irq_disable();
clist = sd->completion_queue;
sd->completion_queue = NULL;
local_irq_enable();
while (clist) {
struct sk_buff *skb = clist;
clist = clist->next;
2008-07-25 21:43:18 -07:00
WARN_ON(atomic_read(&skb->users));
2010-08-23 18:46:12 +09:00
trace_kfree_skb(skb, net_tx_action);
2005-04-16 15:20:36 -07:00
__kfree_skb(skb);
}
}
if (sd->output_queue) {
struct Qdisc *head;
2005-04-16 15:20:36 -07:00
local_irq_disable();
head = sd->output_queue;
sd->output_queue = NULL;
sd->output_queue_tailp = &sd->output_queue;
2005-04-16 15:20:36 -07:00
local_irq_enable();
while (head) {
struct Qdisc *q = head;
spinlock_t *root_lock;
2005-04-16 15:20:36 -07:00
head = head->next_sched;
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
qdisc_run(q);
spin_unlock(root_lock);
2005-04-16 15:20:36 -07:00
} else {
if (!test_bit(__QDISC_STATE_DEACTIVATED,
&q->state)) {
__netif_reschedule(q);
} else {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
}
2005-04-16 15:20:36 -07:00
}
}
}
}
#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
(defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
2009-06-05 05:35:28 +00:00
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
2009-09-11 11:50:08 -07:00
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2009-06-05 05:35:28 +00:00
#endif
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
* when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
* a compare and 2 stores extra right now if we dont have it on
* but have CONFIG_NET_CLS_ACT
2011-03-30 22:57:33 -03:00
* NOTE: This doesn't stop any functionality; if you dont have
* the ingress scheduler, you just can't add policies on ingress.
2005-04-16 15:20:36 -07:00
*
*/
2010-10-02 06:11:55 +00:00
static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
2005-04-16 15:20:36 -07:00
{
struct net_device *dev = skb->dev;
u32 ttl = G_TC_RTTL(skb->tc_verd);
int result = TC_ACT_OK;
struct Qdisc *q;
2007-02-09 23:24:36 +09:00
2010-08-01 00:33:23 -07:00
if (unlikely(MAX_RED_LOOP < ttl++)) {
if (net_ratelimit())
pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
skb->skb_iif, dev->ifindex);
return TC_ACT_SHOT;
2005-04-16 15:20:36 -07:00
}
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2008-07-17 00:53:03 -07:00
q = rxq->qdisc;
2008-07-30 02:37:46 -07:00
if (q != &noop_qdisc) {
2008-07-17 00:53:03 -07:00
spin_lock(qdisc_lock(q));
2008-08-17 21:51:03 -07:00
if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
result = qdisc_enqueue_root(skb, q);
2008-07-17 00:53:03 -07:00
spin_unlock(qdisc_lock(q));
}
2005-04-16 15:20:36 -07:00
return result;
}
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
2010-10-02 06:11:55 +00:00
struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
if (!rxq || rxq->qdisc == &noop_qdisc)
goto out;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
2010-10-02 06:11:55 +00:00
switch (ing_filter(skb, rxq)) {
case TC_ACT_SHOT:
case TC_ACT_STOLEN:
kfree_skb(skb);
return NULL;
}
out:
skb->tc_verd = 0;
return skb;
}
2005-04-16 15:20:36 -07:00
#endif
/**
* netdev_rx_handler_register - register receive handler
* @dev: device to register a handler for
* @rx_handler: receive handler to register
2010-06-10 03:34:59 +00:00
* @rx_handler_data: data pointer that is used by rx handler
*
* Register a receive hander for a device. This handler will then be
* called from __netif_receive_skb. A negative errno code is returned
* on a failure.
*
* The caller must hold the rtnl_mutex.
*
* For a general description of rx_handler, see enum rx_handler_result.
*/
int netdev_rx_handler_register(struct net_device *dev,
2010-06-10 03:34:59 +00:00
rx_handler_func_t *rx_handler,
void *rx_handler_data)
{
ASSERT_RTNL();
if (dev->rx_handler)
return -EBUSY;
2010-06-10 03:34:59 +00:00
rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
rcu_assign_pointer(dev->rx_handler, rx_handler);
return 0;
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
/**
* netdev_rx_handler_unregister - unregister receive handler
* @dev: device to unregister a handler from
*
* Unregister a receive hander from a device.
*
* The caller must hold the rtnl_mutex.
*/
void netdev_rx_handler_unregister(struct net_device *dev)
{
ASSERT_RTNL();
RCU_INIT_POINTER(dev->rx_handler, NULL);
RCU_INIT_POINTER(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2010-03-28 23:07:20 -07:00
static int __netif_receive_skb(struct sk_buff *skb)
2005-04-16 15:20:36 -07:00
{
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
2005-08-09 19:34:12 -07:00
struct net_device *orig_dev;
struct net_device *null_or_dev;
bool deliver_exact = false;
2005-04-16 15:20:36 -07:00
int ret = NET_RX_DROP;
2006-11-14 20:48:11 -08:00
__be16 type;
2005-04-16 15:20:36 -07:00
2011-11-15 04:12:55 +00:00
net_timestamp_check(!netdev_tstamp_prequeue, skb);
2010-08-23 18:45:02 +09:00
trace_netif_receive_skb(skb);
2005-04-16 15:20:36 -07:00
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
2005-04-16 15:20:36 -07:00
return NET_RX_DROP;
2009-11-20 15:35:04 -08:00
if (!skb->skb_iif)
skb->skb_iif = skb->dev->ifindex;
2008-07-02 18:22:00 -07:00
orig_dev = skb->dev;
2011-02-12 06:48:36 +00:00
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb_reset_mac_len(skb);
2005-04-16 15:20:36 -07:00
pt_prev = NULL;
rcu_read_lock();
another_round:
__this_cpu_inc(softnet_data.processed);
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
skb = vlan_untag(skb);
if (unlikely(!skb))
goto out;
}
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_NET_CLS_ACT
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
goto ncls;
}
#endif
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (!ptype->dev || ptype->dev == skb->dev) {
2007-02-09 23:24:36 +09:00
if (pt_prev)
2005-08-09 19:34:12 -07:00
ret = deliver_skb(skb, pt_prev, orig_dev);
2005-04-16 15:20:36 -07:00
pt_prev = ptype;
}
}
#ifdef CONFIG_NET_CLS_ACT
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
if (!skb)
2005-04-16 15:20:36 -07:00
goto out;
ncls:
#endif
2011-10-29 06:13:39 +00:00
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (vlan_tx_tag_present(skb)) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
2011-10-29 06:13:39 +00:00
if (vlan_do_receive(&skb, !rx_handler))
goto another_round;
else if (unlikely(!skb))
goto out;
}
if (rx_handler) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
switch (rx_handler(&skb)) {
case RX_HANDLER_CONSUMED:
goto out;
case RX_HANDLER_ANOTHER:
goto another_round;
case RX_HANDLER_EXACT:
deliver_exact = true;
case RX_HANDLER_PASS:
break;
default:
BUG();
}
}
2005-04-16 15:20:36 -07:00
/* deliver only exact match when indicated */
null_or_dev = deliver_exact ? skb->dev : NULL;
2005-04-16 15:20:36 -07:00
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
if (ptype->type == type &&
(ptype->dev == null_or_dev || ptype->dev == skb->dev ||
ptype->dev == orig_dev)) {
2007-02-09 23:24:36 +09:00
if (pt_prev)
2005-08-09 19:34:12 -07:00
ret = deliver_skb(skb, pt_prev, orig_dev);
2005-04-16 15:20:36 -07:00
pt_prev = ptype;
}
}
if (pt_prev) {
2005-08-09 19:34:12 -07:00
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2005-04-16 15:20:36 -07:00
} else {
2010-09-30 21:06:55 +00:00
atomic_long_inc(&skb->dev->rx_dropped);
2005-04-16 15:20:36 -07:00
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
*/
ret = NET_RX_DROP;
}
out:
rcu_read_unlock();
return ret;
}
2010-03-16 08:03:29 +00:00
/**
* netif_receive_skb - process receive buffer from network
* @skb: buffer to process
*
* netif_receive_skb() is the main receive data processing function.
* It always succeeds. The buffer may be dropped during processing
* for congestion control or by the protocol layers.
*
* This function may only be called from softirq context and interrupts
* should be enabled.
*
* Return values (usually ignored):
* NET_RX_SUCCESS: no congestion
* NET_RX_DROP: packet was dropped
*/
int netif_receive_skb(struct sk_buff *skb)
{
2011-11-15 04:12:55 +00:00
net_timestamp_check(netdev_tstamp_prequeue, skb);
2010-05-15 23:57:10 -07:00
2010-07-17 08:49:36 +00:00
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
2010-03-24 19:13:54 +00:00
#ifdef CONFIG_RPS
if (static_branch(&rps_needed)) {
2010-05-15 23:57:10 -07:00
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu, ret;
2010-03-16 08:03:29 +00:00
2010-05-15 23:57:10 -07:00
rcu_read_lock();
2010-03-16 08:03:29 +00:00
2010-05-15 23:57:10 -07:00
cpu = get_rps_cpu(skb->dev, skb, &rflow);
2010-04-16 16:01:27 -07:00
2010-05-15 23:57:10 -07:00
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
return ret;
2010-05-15 23:57:10 -07:00
}
rcu_read_unlock();
2010-04-16 16:01:27 -07:00
}
#endif
return __netif_receive_skb(skb);
2010-03-16 08:03:29 +00:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(netif_receive_skb);
2005-04-16 15:20:36 -07:00
2010-04-19 05:07:33 +00:00
/* Network device is going away, flush any packets still pending
* Called with irqs disabled.
*/
static void flush_backlog(void *arg)
2008-08-03 21:29:57 -07:00
{
struct net_device *dev = arg;
2010-04-19 21:17:14 +00:00
struct softnet_data *sd = &__get_cpu_var(softnet_data);
2008-08-03 21:29:57 -07:00
struct sk_buff *skb, *tmp;
2010-04-19 21:17:14 +00:00
rps_lock(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
2008-08-03 21:29:57 -07:00
if (skb->dev == dev) {
2010-04-19 21:17:14 +00:00
__skb_unlink(skb, &sd->input_pkt_queue);
2008-08-03 21:29:57 -07:00
kfree_skb(skb);
input_queue_head_incr(sd);
2008-08-03 21:29:57 -07:00
}
}
2010-04-19 21:17:14 +00:00
rps_unlock(sd);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
kfree_skb(skb);
input_queue_head_incr(sd);
}
}
2008-08-03 21:29:57 -07:00
}
static int napi_gro_complete(struct sk_buff *skb)
{
struct packet_type *ptype;
__be16 type = skb->protocol;
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
int err = -ENOENT;
2009-04-14 15:11:06 -07:00
if (NAPI_GRO_CB(skb)->count == 1) {
skb_shinfo(skb)->gso_size = 0;
goto out;
2009-04-14 15:11:06 -07:00
}
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
if (ptype->type != type || ptype->dev || !ptype->gro_complete)
continue;
err = ptype->gro_complete(skb);
break;
}
rcu_read_unlock();
if (err) {
WARN_ON(&ptype->list == head);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
out:
return netif_receive_skb(skb);
}
2010-08-31 18:25:32 +00:00
inline void napi_gro_flush(struct napi_struct *napi)
{
struct sk_buff *skb, *next;
for (skb = napi->gro_list; skb; skb = next) {
next = skb->next;
skb->next = NULL;
napi_gro_complete(skb);
}
napi->gro_count = 0;
napi->gro_list = NULL;
}
2010-08-31 18:25:32 +00:00
EXPORT_SYMBOL(napi_gro_flush);
2009-10-29 07:17:09 +00:00
enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
struct packet_type *ptype;
__be16 type = skb->protocol;
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2008-12-26 14:57:42 -08:00
int same_flow;
int mac_len;
2009-10-29 07:17:09 +00:00
enum gro_result ret;
2010-08-05 01:19:11 +00:00
if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
goto normal;
if (skb_is_gso(skb) || skb_has_frag_list(skb))
goto normal;
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
if (ptype->type != type || ptype->dev || !ptype->gro_receive)
continue;
skb_set_network_header(skb, skb_gro_offset(skb));
mac_len = skb->network_header - skb->mac_header;
skb->mac_len = mac_len;
NAPI_GRO_CB(skb)->same_flow = 0;
NAPI_GRO_CB(skb)->flush = 0;
2009-01-04 16:13:40 -08:00
NAPI_GRO_CB(skb)->free = 0;
pp = ptype->gro_receive(&napi->gro_list, skb);
break;
}
rcu_read_unlock();
if (&ptype->list == head)
goto normal;
2008-12-26 14:57:42 -08:00
same_flow = NAPI_GRO_CB(skb)->same_flow;
ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2008-12-26 14:57:42 -08:00
if (pp) {
struct sk_buff *nskb = *pp;
*pp = nskb->next;
nskb->next = NULL;
napi_gro_complete(nskb);
napi->gro_count--;
}
2008-12-26 14:57:42 -08:00
if (same_flow)
goto ok;
if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
goto normal;
napi->gro_count++;
NAPI_GRO_CB(skb)->count = 1;
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
skb->next = napi->gro_list;
napi->gro_list = skb;
ret = GRO_HELD;
pull:
2009-05-26 18:50:31 +00:00
if (skb_headlen(skb) < skb_gro_offset(skb)) {
int grow = skb_gro_offset(skb) - skb_headlen(skb);
BUG_ON(skb->end - skb->tail < grow);
memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
skb->tail += grow;
skb->data_len -= grow;
skb_shinfo(skb)->frags[0].page_offset += grow;
2011-10-18 21:00:24 +00:00
skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
2009-05-26 18:50:31 +00:00
2011-10-18 21:00:24 +00:00
if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
2011-08-22 23:44:58 +00:00
skb_frag_unref(skb, 0);
2009-05-26 18:50:31 +00:00
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
2010-08-11 02:02:10 +00:00
--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
2009-05-26 18:50:31 +00:00
}
}
ok:
return ret;
normal:
ret = GRO_NORMAL;
goto pull;
2009-01-04 16:13:40 -08:00
}
2009-01-06 10:49:34 -08:00
EXPORT_SYMBOL(dev_gro_receive);
2010-08-26 22:03:08 -07:00
static inline gro_result_t
2009-10-29 07:17:09 +00:00
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2009-01-06 10:49:34 -08:00
{
struct sk_buff *p;
for (p = napi->gro_list; p; p = p->next) {
2010-08-26 22:03:08 -07:00
unsigned long diffs;
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= p->vlan_tci ^ skb->vlan_tci;
2010-08-26 22:03:08 -07:00
diffs |= compare_ether_header(skb_mac_header(p),
2009-11-29 16:55:45 -08:00
skb_gro_mac_header(skb));
2010-08-26 22:03:08 -07:00
NAPI_GRO_CB(p)->same_flow = !diffs;
2009-01-06 10:49:34 -08:00
NAPI_GRO_CB(p)->flush = 0;
}
return dev_gro_receive(napi, skb);
}
2009-01-04 16:13:40 -08:00
gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2009-01-04 16:13:40 -08:00
{
switch (ret) {
case GRO_NORMAL:
if (netif_receive_skb(skb))
ret = GRO_DROP;
break;
2009-01-04 16:13:40 -08:00
case GRO_DROP:
case GRO_MERGED_FREE:
2009-01-04 16:13:40 -08:00
kfree_skb(skb);
break;
2009-10-29 07:17:09 +00:00
case GRO_HELD:
case GRO_MERGED:
break;
2009-01-04 16:13:40 -08:00
}
return ret;
}
EXPORT_SYMBOL(napi_skb_finish);
void skb_gro_reset_offset(struct sk_buff *skb)
{
NAPI_GRO_CB(skb)->data_offset = 0;
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
if (skb->mac_header == skb->tail &&
2011-08-22 23:44:58 +00:00
!PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
NAPI_GRO_CB(skb)->frag0 =
2011-08-22 23:44:58 +00:00
skb_frag_address(&skb_shinfo(skb)->frags[0]);
2011-10-18 21:00:24 +00:00
NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
}
}
EXPORT_SYMBOL(skb_gro_reset_offset);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
skb_gro_reset_offset(skb);
return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
}
EXPORT_SYMBOL(napi_gro_receive);
2010-10-19 07:12:10 +00:00
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2009-01-06 10:49:34 -08:00
{
__skb_pull(skb, skb_headlen(skb));
skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
skb->vlan_tci = 0;
2011-01-29 20:44:54 -08:00
skb->dev = napi->dev;
2011-02-02 14:53:25 -08:00
skb->skb_iif = 0;
2009-01-06 10:49:34 -08:00
napi->skb = skb;
}
struct sk_buff *napi_get_frags(struct napi_struct *napi)
2009-01-04 16:13:40 -08:00
{
struct sk_buff *skb = napi->skb;
if (!skb) {
2009-10-13 05:34:20 +00:00
skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
if (skb)
napi->skb = skb;
2009-01-04 16:13:40 -08:00
}
2009-01-06 10:49:34 -08:00
return skb;
}
EXPORT_SYMBOL(napi_get_frags);
2009-01-06 10:49:34 -08:00
gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
gro_result_t ret)
{
switch (ret) {
case GRO_NORMAL:
case GRO_HELD:
2010-02-16 20:25:43 +00:00
skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_HELD)
skb_gro_pull(skb, -ETH_HLEN);
else if (netif_receive_skb(skb))
ret = GRO_DROP;
break;
case GRO_DROP:
case GRO_MERGED_FREE:
napi_reuse_skb(napi, skb);
break;
2009-10-29 07:17:09 +00:00
case GRO_MERGED:
break;
}
return ret;
}
EXPORT_SYMBOL(napi_frags_finish);
struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2009-01-06 10:49:34 -08:00
{
struct sk_buff *skb = napi->skb;
struct ethhdr *eth;
unsigned int hlen;
unsigned int off;
napi->skb = NULL;
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb);
off = skb_gro_offset(skb);
hlen = off + sizeof(*eth);
eth = skb_gro_header_fast(skb, off);
if (skb_gro_header_hard(skb, hlen)) {
eth = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!eth)) {
napi_reuse_skb(napi, skb);
skb = NULL;
goto out;
}
}
skb_gro_pull(skb, sizeof(*eth));
/*
* This works because the only protocols we care about don't require
* special handling. We'll fix it up properly at the end.
*/
skb->protocol = eth->h_proto;
out:
return skb;
}
EXPORT_SYMBOL(napi_frags_skb);
gro_result_t napi_gro_frags(struct napi_struct *napi)
{
struct sk_buff *skb = napi_frags_skb(napi);
2009-01-06 10:49:34 -08:00
if (!skb)
return GRO_DROP;
2009-01-06 10:49:34 -08:00
return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2009-01-04 16:13:40 -08:00
}
EXPORT_SYMBOL(napi_gro_frags);
/*
* net_rps_action sends any pending IPI's for rps.
* Note: called with local irq disabled, but exits with local irq enabled.
*/
static void net_rps_action_and_irq_enable(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
struct softnet_data *remsd = sd->rps_ipi_list;
if (remsd) {
sd->rps_ipi_list = NULL;
local_irq_enable();
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
if (cpu_online(remsd->cpu))
__smp_call_function_single(remsd->cpu,
&remsd->csd, 0);
remsd = next;
}
} else
#endif
local_irq_enable();
}
static int process_backlog(struct napi_struct *napi, int quota)
2005-04-16 15:20:36 -07:00
{
int work = 0;
2010-05-06 22:07:48 -07:00
struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_RPS
/* Check if we have pending ipi, its better to send them now,
* not waiting net_rx_action() end.
*/
if (sd->rps_ipi_list) {
local_irq_disable();
net_rps_action_and_irq_enable(sd);
}
#endif
napi->weight = weight_p;
local_irq_disable();
while (work < quota) {
2005-04-16 15:20:36 -07:00
struct sk_buff *skb;
unsigned int qlen;
2005-04-16 15:20:36 -07:00
while ((skb = __skb_dequeue(&sd->process_queue))) {
2010-04-05 15:42:39 -07:00
local_irq_enable();
__netif_receive_skb(skb);
local_irq_disable();
input_queue_head_incr(sd);
if (++work >= quota) {
local_irq_enable();
return work;
}
}
2005-04-16 15:20:36 -07:00
rps_lock(sd);
qlen = skb_queue_len(&sd->input_pkt_queue);
if (qlen)
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
if (qlen < quota - work) {
2010-05-06 22:07:48 -07:00
/*
* Inline a custom version of __napi_complete().
* only current cpu owns and manipulates this napi,
* and NAPI_STATE_SCHED is the only possible flag set on backlog.
* we can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
list_del(&napi->poll_list);
napi->state = 0;
quota = work + qlen;
}
rps_unlock(sd);
}
local_irq_enable();
2005-04-16 15:20:36 -07:00
return work;
2005-04-16 15:20:36 -07:00
}
/**
* __napi_schedule - schedule for receive
* @n: entry to schedule
*
* The entry's receive function will be scheduled to run
*/
2008-02-13 15:03:16 -08:00
void __napi_schedule(struct napi_struct *n)
{
unsigned long flags;
local_irq_save(flags);
2010-05-06 22:07:48 -07:00
____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__napi_schedule);
void __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
BUG_ON(n->gro_list);
list_del(&n->poll_list);
smp_mb__before_clear_bit();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);
void napi_complete(struct napi_struct *n)
{
unsigned long flags;
/*
* don't let napi dequeue from the cpu poll list
* just in case its running on a different cpu
*/
if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
return;
napi_gro_flush(n);
local_irq_save(flags);
__napi_complete(n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(napi_complete);
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
INIT_LIST_HEAD(&napi->poll_list);
napi->gro_count = 0;
napi->gro_list = NULL;
2009-01-04 16:13:40 -08:00
napi->skb = NULL;
napi->poll = poll;
napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
2009-01-04 16:13:40 -08:00
#ifdef CONFIG_NETPOLL
spin_lock_init(&napi->poll_lock);
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
}
EXPORT_SYMBOL(netif_napi_add);
void netif_napi_del(struct napi_struct *napi)
{
struct sk_buff *skb, *next;
2008-12-26 01:35:35 -08:00
list_del_init(&napi->dev_list);
napi_free_frags(napi);
for (skb = napi->gro_list; skb; skb = next) {
next = skb->next;
skb->next = NULL;
kfree_skb(skb);
}
napi->gro_list = NULL;
napi->gro_count = 0;
}
EXPORT_SYMBOL(netif_napi_del);
2005-04-16 15:20:36 -07:00
static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
2008-11-03 17:14:38 -08:00
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
2005-08-11 19:27:43 -07:00
void *have;
2005-04-16 15:20:36 -07:00
local_irq_disable();
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
2005-04-16 15:20:36 -07:00
/* If softirq window is exhuasted then punt.
2008-11-03 17:14:38 -08:00
* Allow this to run for 2 jiffies since which will allow
* an average latency of 1.5/HZ.
*/
2008-11-03 17:14:38 -08:00
if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2005-04-16 15:20:36 -07:00
goto softnet_break;
local_irq_enable();
/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
* entries to the tail of this list, and only ->poll()
* calls can remove this head entry from the list.
*/
n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
2005-04-16 15:20:36 -07:00
have = netpoll_poll_lock(n);
weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
2011-03-30 22:57:33 -03:00
* accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}
WARN_ON_ONCE(work > weight);
budget -= work;
local_irq_disable();
/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(work == weight)) {
if (unlikely(napi_disable_pending(n))) {
local_irq_enable();
napi_complete(n);
local_irq_disable();
} else
list_move_tail(&n->poll_list, &sd->poll_list);
}
netpoll_poll_unlock(have);
2005-04-16 15:20:36 -07:00
}
out:
net_rps_action_and_irq_enable(sd);
2010-03-16 08:03:29 +00:00
#ifdef CONFIG_NET_DMA
/*
* There may not be any more sk_buffs coming right now, so push
* any pending DMA copies to hardware
*/
dma_issue_pending_all();
#endif
2005-04-16 15:20:36 -07:00
return;
softnet_break:
2010-05-02 05:42:16 +00:00
sd->time_squeeze++;
2005-04-16 15:20:36 -07:00
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
2009-09-03 01:29:39 -07:00
static gifconf_func_t *gifconf_list[NPROTO];
2005-04-16 15:20:36 -07:00
/**
* register_gifconf - register a SIOCGIF handler
* @family: Address family
* @gifconf: Function handler
*
* Register protocol dependent address dumping routines. The handler
* that is passed must not be freed or reused until it has been replaced
* by another handler.
*/
2009-09-03 01:29:39 -07:00
int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
2005-04-16 15:20:36 -07:00
{
if (family >= NPROTO)
return -EINVAL;
gifconf_list[family] = gifconf;
return 0;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(register_gifconf);
2005-04-16 15:20:36 -07:00
/*
* Map an interface index to its name (SIOCGIFNAME)
*/
/*
* We need this ioctl for efficient implementation of the
* if_indextoname() function required by the IPv6 API. Without
* it, we would have to search all the interfaces to find a
* match. --pb
*/
static int dev_ifname(struct net *net, struct ifreq __user *arg)
2005-04-16 15:20:36 -07:00
{
struct net_device *dev;
struct ifreq ifr;
/*
* Fetch the caller's info block.
*/
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
2009-10-19 19:18:49 +00:00
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
2005-04-16 15:20:36 -07:00
if (!dev) {
2009-10-19 19:18:49 +00:00
rcu_read_unlock();
2005-04-16 15:20:36 -07:00
return -ENODEV;
}
strcpy(ifr.ifr_name, dev->name);
2009-10-19 19:18:49 +00:00
rcu_read_unlock();
2005-04-16 15:20:36 -07:00
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
return -EFAULT;
return 0;
}
/*
* Perform a SIOCGIFCONF call. This structure will change
* size eventually, and there is nothing I can do about it.
* Thus we will need a 'compatibility mode'.
*/
static int dev_ifconf(struct net *net, char __user *arg)
2005-04-16 15:20:36 -07:00
{
struct ifconf ifc;
struct net_device *dev;
char __user *pos;
int len;
int total;
int i;
/*
* Fetch the caller's info block.
*/
if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
return -EFAULT;
pos = ifc.ifc_buf;
len = ifc.ifc_len;
/*
* Loop over the interfaces, and write an info block for each.
*/
total = 0;
for_each_netdev(net, dev) {
2005-04-16 15:20:36 -07:00
for (i = 0; i < NPROTO; i++) {
if (gifconf_list[i]) {
int done;
if (!pos)
done = gifconf_list[i](dev, NULL, 0);
else
done = gifconf_list[i](dev, pos + total,
len - total);
if (done < 0)
return -EFAULT;
total += done;
}
}
2007-02-09 23:24:36 +09:00
}
2005-04-16 15:20:36 -07:00
/*
* All done. Write the updated control block back to the caller.
*/
ifc.ifc_len = total;
/*
* Both BSD and Solaris return 0 here, so we do too.
*/
return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
}
#ifdef CONFIG_PROC_FS
2011-10-20 20:45:10 +00:00
#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
struct dev_iter_state {
struct seq_net_private p;
unsigned int pos; /* bucket << BUCKET_SPACE + offset */
};
#define get_bucket(x) ((x) >> BUCKET_SPACE)
#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
{
struct dev_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
struct net_device *dev;
struct hlist_node *p;
struct hlist_head *h;
unsigned int count, bucket, offset;
bucket = get_bucket(state->pos);
offset = get_offset(state->pos);
h = &net->dev_name_head[bucket];
count = 0;
hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
if (count++ == offset) {
state->pos = set_bucket_offset(bucket, count);
return dev;
}
}
return NULL;
}
static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
{
struct dev_iter_state *state = seq->private;
struct net_device *dev;
unsigned int bucket;
bucket = get_bucket(state->pos);
do {
dev = dev_from_same_bucket(seq);
if (dev)
return dev;
bucket++;
state->pos = set_bucket_offset(bucket, 0);
} while (bucket < NETDEV_HASHENTRIES);
return NULL;
}
2005-04-16 15:20:36 -07:00
/*
* This is invoked by the /proc filesystem handler to display a device
* in detail.
*/
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
2005-04-16 15:20:36 -07:00
{
2011-10-20 20:45:10 +00:00
struct dev_iter_state *state = seq->private;
2007-05-03 15:13:45 -07:00
rcu_read_lock();
2007-05-03 15:13:45 -07:00
if (!*pos)
return SEQ_START_TOKEN;
2011-10-20 20:45:10 +00:00
/* check for end of the hash */
if (state->pos == 0 && *pos > 1)
return NULL;
2007-05-03 15:13:45 -07:00
2011-10-20 20:45:10 +00:00
return dev_from_new_bucket(seq);
2005-04-16 15:20:36 -07:00
}
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
2011-10-20 20:45:10 +00:00
struct net_device *dev;
2005-04-16 15:20:36 -07:00
++*pos;
2011-10-20 20:45:10 +00:00
if (v == SEQ_START_TOKEN)
return dev_from_new_bucket(seq);
dev = dev_from_same_bucket(seq);
if (dev)
return dev;
return dev_from_new_bucket(seq);
2005-04-16 15:20:36 -07:00
}
void dev_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
2005-04-16 15:20:36 -07:00
{
rcu_read_unlock();
2005-04-16 15:20:36 -07:00
}
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
2010-07-07 14:58:56 -07:00
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
2005-04-16 15:20:36 -07:00
seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
"%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors,
stats->rx_fifo_errors,
stats->rx_length_errors + stats->rx_over_errors +
stats->rx_crc_errors + stats->rx_frame_errors,
stats->rx_compressed, stats->multicast,
stats->tx_bytes, stats->tx_packets,
stats->tx_errors, stats->tx_dropped,
stats->tx_fifo_errors, stats->collisions,
stats->tx_carrier_errors +
stats->tx_aborted_errors +
stats->tx_window_errors +
stats->tx_heartbeat_errors,
stats->tx_compressed);
2005-04-16 15:20:36 -07:00
}
/*
* Called from the PROCfs module. This now uses the new arbitrary sized
* /proc/net interface to create /proc/net/dev
*/
static int dev_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Inter-| Receive "
" | Transmit\n"
" face |bytes packets errs drop fifo frame "
"compressed multicast|bytes packets errs "
"drop fifo colls carrier compressed\n");
else
dev_seq_printf_stats(seq, v);
return 0;
}
2010-05-02 05:42:16 +00:00
static struct softnet_data *softnet_get_online(loff_t *pos)
2005-04-16 15:20:36 -07:00
{
2010-05-02 05:42:16 +00:00
struct softnet_data *sd = NULL;
2005-04-16 15:20:36 -07:00
while (*pos < nr_cpu_ids)
2007-02-09 23:24:36 +09:00
if (cpu_online(*pos)) {
2010-05-02 05:42:16 +00:00
sd = &per_cpu(softnet_data, *pos);
2005-04-16 15:20:36 -07:00
break;
} else
++*pos;
2010-05-02 05:42:16 +00:00
return sd;
2005-04-16 15:20:36 -07:00
}
static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
{
return softnet_get_online(pos);
}
static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return softnet_get_online(pos);
}
static void softnet_seq_stop(struct seq_file *seq, void *v)
{
}
static int softnet_seq_show(struct seq_file *seq, void *v)
{
2010-05-02 05:42:16 +00:00
struct softnet_data *sd = v;
2005-04-16 15:20:36 -07:00
2010-03-16 08:03:29 +00:00
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2010-05-02 05:42:16 +00:00
sd->processed, sd->dropped, sd->time_squeeze, 0,
2005-06-23 20:08:59 -07:00
0, 0, 0, 0, /* was fastroute */
2010-05-02 05:42:16 +00:00
sd->cpu_collision, sd->received_rps);
2005-04-16 15:20:36 -07:00
return 0;
}
2007-03-12 14:34:29 -07:00
static const struct seq_operations dev_seq_ops = {
2005-04-16 15:20:36 -07:00
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = dev_seq_show,
};
static int dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &dev_seq_ops,
2011-10-20 20:45:10 +00:00
sizeof(struct dev_iter_state));
2005-04-16 15:20:36 -07:00
}
int dev_seq_open_ops(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
}
2007-02-12 00:55:35 -08:00
static const struct file_operations dev_seq_fops = {
2005-04-16 15:20:36 -07:00
.owner = THIS_MODULE,
.open = dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
2005-04-16 15:20:36 -07:00
};
2007-03-12 14:34:29 -07:00
static const struct seq_operations softnet_seq_ops = {
2005-04-16 15:20:36 -07:00
.start = softnet_seq_start,
.next = softnet_seq_next,
.stop = softnet_seq_stop,
.show = softnet_seq_show,
};
static int softnet_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &softnet_seq_ops);
}
2007-02-12 00:55:35 -08:00
static const struct file_operations softnet_seq_fops = {
2005-04-16 15:20:36 -07:00
.owner = THIS_MODULE,
.open = softnet_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
2007-03-12 14:35:37 -07:00
static void *ptype_get_idx(loff_t pos)
{
struct packet_type *pt = NULL;
loff_t i = 0;
int t;
list_for_each_entry_rcu(pt, &ptype_all, list) {
if (i == pos)
return pt;
++i;
}
for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2007-03-12 14:35:37 -07:00
list_for_each_entry_rcu(pt, &ptype_base[t], list) {
if (i == pos)
return pt;
++i;
}
}
return NULL;
}
static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
2007-03-12 14:35:37 -07:00
{
rcu_read_lock();
return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct packet_type *pt;
struct list_head *nxt;
int hash;
++*pos;
if (v == SEQ_START_TOKEN)
return ptype_get_idx(0);
pt = v;
nxt = pt->list.next;
if (pt->type == htons(ETH_P_ALL)) {
if (nxt != &ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
} else
hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2007-03-12 14:35:37 -07:00
while (nxt == &ptype_base[hash]) {
if (++hash >= PTYPE_HASH_SIZE)
2007-03-12 14:35:37 -07:00
return NULL;
nxt = ptype_base[hash].next;
}
found:
return list_entry(nxt, struct packet_type, list);
}
static void ptype_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
2007-03-12 14:35:37 -07:00
{
rcu_read_unlock();
}
static int ptype_seq_show(struct seq_file *seq, void *v)
{
struct packet_type *pt = v;
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Type Device Function\n");
else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2007-03-12 14:35:37 -07:00
if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL ");
else
seq_printf(seq, "%04x", ntohs(pt->type));
2008-11-16 19:50:35 -08:00
seq_printf(seq, " %-8s %pF\n",
pt->dev ? pt->dev->name : "", pt->func);
2007-03-12 14:35:37 -07:00
}
return 0;
}
static const struct seq_operations ptype_seq_ops = {
.start = ptype_seq_start,
.next = ptype_seq_next,
.stop = ptype_seq_stop,
.show = ptype_seq_show,
};
static int ptype_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ptype_seq_ops,
sizeof(struct seq_net_private));
2007-03-12 14:35:37 -07:00
}
static const struct file_operations ptype_seq_fops = {
.owner = THIS_MODULE,
.open = ptype_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
2007-03-12 14:35:37 -07:00
};
static int __net_init dev_proc_net_init(struct net *net)
2005-04-16 15:20:36 -07:00
{
int rc = -ENOMEM;
if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2005-04-16 15:20:36 -07:00
goto out;
if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2005-04-16 15:20:36 -07:00
goto out_dev;
if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2007-09-12 12:01:34 +02:00
goto out_softnet;
2007-03-12 14:35:37 -07:00
if (wext_proc_init(net))
2007-09-12 12:01:34 +02:00
goto out_ptype;
2005-04-16 15:20:36 -07:00
rc = 0;
out:
return rc;
2007-09-12 12:01:34 +02:00
out_ptype:
proc_net_remove(net, "ptype");
2005-04-16 15:20:36 -07:00
out_softnet:
proc_net_remove(net, "softnet_stat");
2005-04-16 15:20:36 -07:00
out_dev:
proc_net_remove(net, "dev");
2005-04-16 15:20:36 -07:00
goto out;
}
static void __net_exit dev_proc_net_exit(struct net *net)
{
wext_proc_exit(net);
proc_net_remove(net, "ptype");
proc_net_remove(net, "softnet_stat");
proc_net_remove(net, "dev");
}
static struct pernet_operations __net_initdata dev_proc_ops = {
.init = dev_proc_net_init,
.exit = dev_proc_net_exit,
};
static int __init dev_proc_init(void)
{
return register_pernet_subsys(&dev_proc_ops);
}
2005-04-16 15:20:36 -07:00
#else
#define dev_proc_init() 0
#endif /* CONFIG_PROC_FS */
/**
2011-02-12 06:48:36 +00:00
* netdev_set_master - set up master pointer
2005-04-16 15:20:36 -07:00
* @slave: slave device
* @master: new master device
*
* Changes the master device of the slave. Pass %NULL to break the
* bonding. The caller must hold the RTNL semaphore. On a failure
* a negative errno code is returned. On success the reference counts
2011-02-12 06:48:36 +00:00
* are adjusted and the function returns zero.
2005-04-16 15:20:36 -07:00
*/
int netdev_set_master(struct net_device *slave, struct net_device *master)
{
struct net_device *old = slave->master;
ASSERT_RTNL();
if (master) {
if (old)
return -EBUSY;
dev_hold(master);
}
slave->master = master;
2007-02-09 23:24:36 +09:00
if (old)
2005-04-16 15:20:36 -07:00
dev_put(old);
2011-02-12 06:48:36 +00:00
return 0;
}
EXPORT_SYMBOL(netdev_set_master);
/**
* netdev_set_bond_master - set up bonding master/slave pair
* @slave: slave device
* @master: new master device
*
* Changes the master device of the slave. Pass %NULL to break the
* bonding. The caller must hold the RTNL semaphore. On a failure
* a negative errno code is returned. On success %RTM_NEWLINK is sent
* to the routing socket and the function returns zero.
*/
int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
{
int err;
ASSERT_RTNL();
err = netdev_set_master(slave, master);
if (err)
return err;
2005-04-16 15:20:36 -07:00
if (master)
slave->flags |= IFF_SLAVE;
else
slave->flags &= ~IFF_SLAVE;
rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
return 0;
}
2011-02-12 06:48:36 +00:00
EXPORT_SYMBOL(netdev_set_bond_master);
2005-04-16 15:20:36 -07:00
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
ops->ndo_change_rx_flags(dev, flags);
}
static int __dev_set_promiscuity(struct net_device *dev, int inc)
{
2011-11-30 21:42:26 +00:00
unsigned int old_flags = dev->flags;
uid_t uid;
gid_t gid;
ASSERT_RTNL();
dev->flags |= IFF_PROMISC;
dev->promiscuity += inc;
if (dev->promiscuity == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch promisc and return error.
*/
if (inc < 0)
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
printk(KERN_WARNING "%s: promiscuity touches roof, "
"set promiscuity failed, promiscuity feature "
"of device might be broken.\n", dev->name);
return -EOVERFLOW;
}
}
if (dev->flags != old_flags) {
printk(KERN_INFO "device %s %s promiscuous mode\n",
dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
"left");
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(current->audit_context, GFP_ATOMIC,
AUDIT_ANOM_PROMISCUOUS,
"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
dev->name, (dev->flags & IFF_PROMISC),
(old_flags & IFF_PROMISC),
audit_get_loginuid(current),
uid, gid,
audit_get_sessionid(current));
}
dev_change_rx_flags(dev, IFF_PROMISC);
}
return 0;
}
2005-04-16 15:20:36 -07:00
/**
* dev_set_promiscuity - update promiscuity count on a device
* @dev: device
* @inc: modifier
*
2006-05-26 13:25:24 -07:00
* Add or remove promiscuity from a device. While the count in the device
2005-04-16 15:20:36 -07:00
* remains above zero the interface remains promiscuous. Once it hits zero
* the device reverts back to normal filtering operation. A negative inc
* value is used to drop promiscuity on the device.
* Return 0 if successful or a negative errno code on error.
2005-04-16 15:20:36 -07:00
*/
int dev_set_promiscuity(struct net_device *dev, int inc)
2005-04-16 15:20:36 -07:00
{
2011-11-30 21:42:26 +00:00
unsigned int old_flags = dev->flags;
int err;
2005-04-16 15:20:36 -07:00
err = __dev_set_promiscuity(dev, inc);
2008-07-06 15:49:08 -07:00
if (err < 0)
return err;
if (dev->flags != old_flags)
dev_set_rx_mode(dev);
return err;
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_set_promiscuity);
2005-04-16 15:20:36 -07:00
/**
* dev_set_allmulti - update allmulti count on a device
* @dev: device
* @inc: modifier
*
* Add or remove reception of all multicast frames to a device. While the
* count in the device remains above zero the interface remains listening
* to all interfaces. Once it hits zero the device reverts back to normal
* filtering operation. A negative @inc value is used to drop the counter
* when releasing a resource needing all multicasts.
* Return 0 if successful or a negative errno code on error.
2005-04-16 15:20:36 -07:00
*/
int dev_set_allmulti(struct net_device *dev, int inc)
2005-04-16 15:20:36 -07:00
{
2011-11-30 21:42:26 +00:00
unsigned int old_flags = dev->flags;
2005-04-16 15:20:36 -07:00
ASSERT_RTNL();
2005-04-16 15:20:36 -07:00
dev->flags |= IFF_ALLMULTI;
dev->allmulti += inc;
if (dev->allmulti == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch allmulti and return error.
*/
if (inc < 0)
dev->flags &= ~IFF_ALLMULTI;
else {
dev->allmulti -= inc;
printk(KERN_WARNING "%s: allmulti touches roof, "
"set allmulti failed, allmulti feature of "
"device might be broken.\n", dev->name);
return -EOVERFLOW;
}
}
if (dev->flags ^ old_flags) {
dev_change_rx_flags(dev, IFF_ALLMULTI);
dev_set_rx_mode(dev);
}
return 0;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_set_allmulti);
/*
* Upload unicast and multicast address lists to device and
* configure RX filtering. When the device doesn't support unicast
2007-12-20 14:02:06 -08:00
* filtering it is put in promiscuous mode while unicast addresses
* are present.
*/
void __dev_set_rx_mode(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
/* dev_open will call this function so the list will stay sane. */
if (!(dev->flags&IFF_UP))
return;
if (!netif_device_present(dev))
2007-07-19 10:43:23 +09:00
return;
2011-08-16 06:29:00 +00:00
if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
/* Unicast addresses changes may only happen under the rtnl,
* therefore calling __dev_set_promiscuity here is safe.
*/
2010-01-25 13:36:10 -08:00
if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
__dev_set_promiscuity(dev, 1);
dev->uc_promisc = true;
2010-01-25 13:36:10 -08:00
} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
__dev_set_promiscuity(dev, -1);
dev->uc_promisc = false;
}
}
2011-08-16 06:29:00 +00:00
if (ops->ndo_set_rx_mode)
ops->ndo_set_rx_mode(dev);
}
void dev_set_rx_mode(struct net_device *dev)
{
netif_addr_lock_bh(dev);
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
2005-04-16 15:20:36 -07:00
}
2008-09-30 02:23:58 -07:00
/**
* dev_get_flags - get flags reported to userspace
* @dev: device
*
* Get the combination of flag bits exported through APIs to userspace.
*/
2005-04-16 15:20:36 -07:00
unsigned dev_get_flags(const struct net_device *dev)
{
unsigned flags;
flags = (dev->flags & ~(IFF_PROMISC |
IFF_ALLMULTI |
2006-03-20 17:09:11 -08:00
IFF_RUNNING |
IFF_LOWER_UP |
IFF_DORMANT)) |
2005-04-16 15:20:36 -07:00
(dev->gflags & (IFF_PROMISC |
IFF_ALLMULTI));
2006-03-20 17:09:11 -08:00
if (netif_running(dev)) {
if (netif_oper_up(dev))
flags |= IFF_RUNNING;
if (netif_carrier_ok(dev))
flags |= IFF_LOWER_UP;
if (netif_dormant(dev))
flags |= IFF_DORMANT;
}
2005-04-16 15:20:36 -07:00
return flags;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_get_flags);
2005-04-16 15:20:36 -07:00
int __dev_change_flags(struct net_device *dev, unsigned int flags)
2005-04-16 15:20:36 -07:00
{
2011-11-30 21:42:26 +00:00
unsigned int old_flags = dev->flags;
int ret;
2005-04-16 15:20:36 -07:00
ASSERT_RTNL();
2005-04-16 15:20:36 -07:00
/*
* Set the flags on our device.
*/
dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
IFF_AUTOMEDIA)) |
(dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
IFF_ALLMULTI));
/*
* Load in the correct multicast list now the flags have changed.
*/
if ((old_flags ^ flags) & IFF_MULTICAST)
dev_change_rx_flags(dev, IFF_MULTICAST);
dev_set_rx_mode(dev);
2005-04-16 15:20:36 -07:00
/*
* Have we downed the interface. We handle IFF_UP ourselves
* according to user attempts to set it, rather than blindly
* setting it.
*/
ret = 0;
if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
2005-04-16 15:20:36 -07:00
if (!ret)
dev_set_rx_mode(dev);
2005-04-16 15:20:36 -07:00
}
if ((flags ^ dev->gflags) & IFF_PROMISC) {
2009-09-03 01:29:39 -07:00
int inc = (flags & IFF_PROMISC) ? 1 : -1;
2005-04-16 15:20:36 -07:00
dev->gflags ^= IFF_PROMISC;
dev_set_promiscuity(dev, inc);
}
/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
is important. Some (broken) drivers set IFF_PROMISC, when
IFF_ALLMULTI is requested not asking us and not reporting.
*/
if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2009-09-03 01:29:39 -07:00
int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
2005-04-16 15:20:36 -07:00
dev->gflags ^= IFF_ALLMULTI;
dev_set_allmulti(dev, inc);
}
return ret;
}
void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
{
unsigned int changes = dev->flags ^ old_flags;
if (changes & IFF_UP) {
if (dev->flags & IFF_UP)
call_netdevice_notifiers(NETDEV_UP, dev);
else
call_netdevice_notifiers(NETDEV_DOWN, dev);
}
if (dev->flags & IFF_UP &&
(changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
call_netdevice_notifiers(NETDEV_CHANGE, dev);
}
/**
* dev_change_flags - change device settings
* @dev: device
* @flags: device state flags
*
* Change settings on device based state flags. The flags are
* in the userspace exported format.
*/
2011-11-30 21:42:26 +00:00
int dev_change_flags(struct net_device *dev, unsigned int flags)
{
2011-11-30 21:42:26 +00:00
int ret;
unsigned int changes, old_flags = dev->flags;
ret = __dev_change_flags(dev, flags);
if (ret < 0)
return ret;
changes = old_flags ^ dev->flags;
if (changes)
rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
2005-04-16 15:20:36 -07:00
__dev_notify_flags(dev, old_flags);
2005-04-16 15:20:36 -07:00
return ret;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_change_flags);
2005-04-16 15:20:36 -07:00
2008-09-30 02:23:58 -07:00
/**
* dev_set_mtu - Change maximum transfer unit
* @dev: device
* @new_mtu: new transfer unit
*
* Change the maximum transfer size of the network device.
*/
2005-04-16 15:20:36 -07:00
int dev_set_mtu(struct net_device *dev, int new_mtu)
{
const struct net_device_ops *ops = dev->netdev_ops;
2005-04-16 15:20:36 -07:00
int err;
if (new_mtu == dev->mtu)
return 0;
/* MTU must be positive. */
if (new_mtu < 0)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
err = 0;
if (ops->ndo_change_mtu)
err = ops->ndo_change_mtu(dev, new_mtu);
2005-04-16 15:20:36 -07:00
else
dev->mtu = new_mtu;
2005-04-16 15:20:36 -07:00
if (!err && dev->flags & IFF_UP)
2007-09-16 15:42:43 -07:00
call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
2005-04-16 15:20:36 -07:00
return err;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_set_mtu);
2005-04-16 15:20:36 -07:00
/**
* dev_set_group - Change group this device belongs to
* @dev: device
* @new_group: group this device should belong to
*/
void dev_set_group(struct net_device *dev, int new_group)
{
dev->group = new_group;
}
EXPORT_SYMBOL(dev_set_group);
2008-09-30 02:23:58 -07:00
/**
* dev_set_mac_address - Change Media Access Control Address
* @dev: device
* @sa: new address
*
* Change the hardware (MAC) address of the device
*/
2005-04-16 15:20:36 -07:00
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
{
const struct net_device_ops *ops = dev->netdev_ops;
2005-04-16 15:20:36 -07:00
int err;
if (!ops->ndo_set_mac_address)
2005-04-16 15:20:36 -07:00
return -EOPNOTSUPP;
if (sa->sa_family != dev->type)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
err = ops->ndo_set_mac_address(dev, sa);
2005-04-16 15:20:36 -07:00
if (!err)
2007-09-16 15:42:43 -07:00
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2005-04-16 15:20:36 -07:00
return err;
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(dev_set_mac_address);
2005-04-16 15:20:36 -07:00
/*
2009-11-01 19:42:09 +00:00
* Perform the SIOCxIFxxx calls, inside rcu_read_lock()
2005-04-16 15:20:36 -07:00
*/
static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
2005-04-16 15:20:36 -07:00
{
int err;
2009-11-01 19:42:09 +00:00
struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
2005-04-16 15:20:36 -07:00
if (!dev)
return -ENODEV;
switch (cmd) {
2009-09-03 01:29:39 -07:00
case SIOCGIFFLAGS: /* Get interface flags */
ifr->ifr_flags = (short) dev_get_flags(dev);
return 0;
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
case SIOCGIFMETRIC: /* Get the metric on the interface
(currently unused) */
ifr->ifr_metric = 0;
return 0;
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
case SIOCGIFMTU: /* Get the MTU of a device */
ifr->ifr_mtu = dev->mtu;
return 0;
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
case SIOCGIFHWADDR:
if (!dev->addr_len)
memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
else
memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
ifr->ifr_hwaddr.sa_family = dev->type;
return 0;
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
case SIOCGIFSLAVE:
err = -EINVAL;
break;
2009-09-03 01:29:39 -07:00
case SIOCGIFMAP:
ifr->ifr_map.mem_start = dev->mem_start;
ifr->ifr_map.mem_end = dev->mem_end;
ifr->ifr_map.base_addr = dev->base_addr;
ifr->ifr_map.irq = dev->irq;
ifr->ifr_map.dma = dev->dma;
ifr->ifr_map.port = dev->if_port;
return 0;
2009-09-03 01:29:39 -07:00
case SIOCGIFINDEX:
ifr->ifr_ifindex = dev->ifindex;
return 0;
2009-09-03 01:29:39 -07:00
case SIOCGIFTXQLEN:
ifr->ifr_qlen = dev->tx_queue_len;
return 0;
2009-09-03 01:29:39 -07:00
default:
/* dev_ioctl() should ensure this case
* is never reached
*/
WARN_ON(1);
err = -ENOTTY;
2009-09-03 01:29:39 -07:00
break;
}
return err;
}
/*
* Perform the SIOCxIFxxx calls, inside rtnl_lock()
*/
static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
{
int err;
struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
2008-12-22 19:35:28 -08:00
const struct net_device_ops *ops;
if (!dev)
return -ENODEV;
2008-12-22 19:35:28 -08:00
ops = dev->netdev_ops;
switch (cmd) {
2009-09-03 01:29:39 -07:00
case SIOCSIFFLAGS: /* Set interface flags */
return dev_change_flags(dev, ifr->ifr_flags);
2009-09-03 01:29:39 -07:00
case SIOCSIFMETRIC: /* Set the metric on the interface
(currently unused) */
return -EOPNOTSUPP;
2009-09-03 01:29:39 -07:00
case SIOCSIFMTU: /* Set the MTU of a device */
return dev_set_mtu(dev, ifr->ifr_mtu);
2009-09-03 01:29:39 -07:00
case SIOCSIFHWADDR:
return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
case SIOCSIFHWBROADCAST:
if (ifr->ifr_hwaddr.sa_family != dev->type)
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return 0;
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
case SIOCSIFMAP:
if (ops->ndo_set_config) {
if (!netif_device_present(dev))
return -ENODEV;
return ops->ndo_set_config(dev, &ifr->ifr_map);
}
return -EOPNOTSUPP;
case SIOCADDMULTI:
2011-08-16 06:29:02 +00:00
if (!ops->ndo_set_rx_mode ||
2009-09-03 01:29:39 -07:00
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
2010-04-01 21:22:57 +00:00
return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
2009-09-03 01:29:39 -07:00
case SIOCDELMULTI:
2011-08-16 06:29:02 +00:00
if (!ops->ndo_set_rx_mode ||
2009-09-03 01:29:39 -07:00
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
2010-04-01 21:22:57 +00:00
return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
2009-09-03 01:29:39 -07:00
case SIOCSIFTXQLEN:
if (ifr->ifr_qlen < 0)
return -EINVAL;
dev->tx_queue_len = ifr->ifr_qlen;
return 0;
case SIOCSIFNAME:
ifr->ifr_newname[IFNAMSIZ-1] = '\0';
return dev_change_name(dev, ifr->ifr_newname);
2011-10-19 17:00:35 -04:00
case SIOCSHWTSTAMP:
err = net_hwtstamp_validate(ifr);
if (err)
return err;
/* fall through */
2009-09-03 01:29:39 -07:00
/*
* Unknown or private ioctl
*/
default:
if ((cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15) ||
cmd == SIOCBONDENSLAVE ||
cmd == SIOCBONDRELEASE ||
cmd == SIOCBONDSETHWADDR ||
cmd == SIOCBONDSLAVEINFOQUERY ||
cmd == SIOCBONDINFOQUERY ||
cmd == SIOCBONDCHANGEACTIVE ||
cmd == SIOCGMIIPHY ||
cmd == SIOCGMIIREG ||
cmd == SIOCSMIIREG ||
cmd == SIOCBRADDIF ||
cmd == SIOCBRDELIF ||
cmd == SIOCSHWTSTAMP ||
cmd == SIOCWANDEV) {
err = -EOPNOTSUPP;
if (ops->ndo_do_ioctl) {
if (netif_device_present(dev))
err = ops->ndo_do_ioctl(dev, ifr, cmd);
else
err = -ENODEV;
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
} else
err = -EINVAL;
2005-04-16 15:20:36 -07:00
}
return err;
}
/*
* This function handles all "interface"-type I/O control requests. The actual
* 'doing' part of this is dev_ifsioc above.
*/
/**
* dev_ioctl - network device ioctl
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
* @cmd: command to issue
* @arg: pointer to a struct ifreq in user space
*
* Issue ioctl functions to devices. This is normally called by the
* user space syscall interfaces but can sometimes be useful for
* other purposes. The return value is the return from the syscall if
* positive or a negative errno code on error.
*/
int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2005-04-16 15:20:36 -07:00
{
struct ifreq ifr;
int ret;
char *colon;
/* One special case: SIOCGIFCONF takes ifconf argument
and requires shared lock, because it sleeps writing
to user space.
*/
if (cmd == SIOCGIFCONF) {
2006-03-20 22:23:58 -08:00
rtnl_lock();
ret = dev_ifconf(net, (char __user *) arg);
2006-03-20 22:23:58 -08:00
rtnl_unlock();
2005-04-16 15:20:36 -07:00
return ret;
}
if (cmd == SIOCGIFNAME)
return dev_ifname(net, (struct ifreq __user *)arg);
2005-04-16 15:20:36 -07:00
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
ifr.ifr_name[IFNAMSIZ-1] = 0;
colon = strchr(ifr.ifr_name, ':');
if (colon)
*colon = 0;
/*
* See which interface the caller is talking about.
*/
switch (cmd) {
2009-09-03 01:29:39 -07:00
/*
* These ioctl calls:
* - can be done by all.
* - atomic and do not require locking.
* - return a value
*/
case SIOCGIFFLAGS:
case SIOCGIFMETRIC:
case SIOCGIFMTU:
case SIOCGIFHWADDR:
case SIOCGIFSLAVE:
case SIOCGIFMAP:
case SIOCGIFINDEX:
case SIOCGIFTXQLEN:
dev_load(net, ifr.ifr_name);
2009-11-01 19:42:09 +00:00
rcu_read_lock();
2009-09-03 01:29:39 -07:00
ret = dev_ifsioc_locked(net, &ifr, cmd);
2009-11-01 19:42:09 +00:00
rcu_read_unlock();
2009-09-03 01:29:39 -07:00
if (!ret) {
if (colon)
*colon = ':';
if (copy_to_user(arg, &ifr,
sizeof(struct ifreq)))
ret = -EFAULT;
}
return ret;
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
case SIOCETHTOOL:
dev_load(net, ifr.ifr_name);
rtnl_lock();
ret = dev_ethtool(net, &ifr);
rtnl_unlock();
if (!ret) {
if (colon)
*colon = ':';
if (copy_to_user(arg, &ifr,
sizeof(struct ifreq)))
ret = -EFAULT;
}
return ret;
2005-04-16 15:20:36 -07:00
2009-09-03 01:29:39 -07:00
/*
* These ioctl calls:
* - require superuser power.
* - require strict serialization.
* - return a value
*/
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSIFNAME:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
dev_load(net, ifr.ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, &ifr, cmd);
rtnl_unlock();
if (!ret) {
if (colon)
*colon = ':';
if (copy_to_user(arg, &ifr,
sizeof(struct ifreq)))
ret = -EFAULT;
}
return ret;
/*
* These ioctl calls:
* - require superuser power.
* - require strict serialization.
* - do not return a value
*/
case SIOCSIFFLAGS:
case SIOCSIFMETRIC:
case SIOCSIFMTU:
case SIOCSIFMAP:
case SIOCSIFHWADDR:
case SIOCSIFSLAVE:
case SIOCADDMULTI:
case SIOCDELMULTI:
case SIOCSIFHWBROADCAST:
case SIOCSIFTXQLEN:
case SIOCSMIIREG:
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
case SIOCBONDCHANGEACTIVE:
case SIOCBRADDIF:
case SIOCBRDELIF:
case SIOCSHWTSTAMP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* fall through */
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
dev_load(net, ifr.ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, &ifr, cmd);
rtnl_unlock();
return ret;
case SIOCGIFMEM:
/* Get the per device memory space. We can add this but
* currently do not support it */
case SIOCSIFMEM:
/* Set the per device memory buffer space.
* Not applicable in our case */
case SIOCSIFLINK:
return -ENOTTY;
2009-09-03 01:29:39 -07:00
/*
* Unknown or private ioctl.
*/
default:
if (cmd == SIOCWANDEV ||
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
dev_load(net, ifr.ifr_name);
2005-04-16 15:20:36 -07:00
rtnl_lock();
ret = dev_ifsioc(net, &ifr, cmd);
2005-04-16 15:20:36 -07:00
rtnl_unlock();
2009-09-03 01:29:39 -07:00
if (!ret && copy_to_user(arg, &ifr,
2005-04-16 15:20:36 -07:00
sizeof(struct ifreq)))
2009-09-03 01:29:39 -07:00
ret = -EFAULT;
2005-04-16 15:20:36 -07:00
return ret;
2009-09-03 01:29:39 -07:00
}
/* Take care of Wireless Extensions */
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
return wext_handle_ioctl(net, &ifr, cmd, arg);
return -ENOTTY;
2005-04-16 15:20:36 -07:00
}
}
/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
2005-04-16 15:20:36 -07:00
*
* Returns a suitable unique value for a new device interface
* number. The caller must hold the rtnl semaphore or the
* dev_base_lock to be sure it remains unique.
*/
static int dev_new_index(struct net *net)
2005-04-16 15:20:36 -07:00
{
static int ifindex;
for (;;) {
if (++ifindex <= 0)
ifindex = 1;
if (!__dev_get_by_index(net, ifindex))
2005-04-16 15:20:36 -07:00
return ifindex;
}
}
/* Delayed registration/unregisteration */
static LIST_HEAD(net_todo_list);
2005-04-16 15:20:36 -07:00
2007-03-08 20:46:03 -08:00
static void net_set_todo(struct net_device *dev)
2005-04-16 15:20:36 -07:00
{
list_add_tail(&dev->todo_list, &net_todo_list);
}
2009-10-27 07:04:19 +00:00
static void rollback_registered_many(struct list_head *head)
{
struct net_device *dev, *tmp;
2009-10-27 07:04:19 +00:00
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
list_for_each_entry_safe(dev, tmp, head, unreg_list) {
2009-10-27 07:04:19 +00:00
/* Some devices call without registering
* for initialization unwind. Remove those
* devices and proceed with the remaining.
2009-10-27 07:04:19 +00:00
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never "
"was registered\n", dev->name, dev);
2009-10-27 07:04:19 +00:00
WARN_ON(1);
list_del(&dev->unreg_list);
continue;
2009-10-27 07:04:19 +00:00
}
2011-05-19 12:24:16 +00:00
dev->dismantle = true;
2009-10-27 07:04:19 +00:00
BUG_ON(dev->reg_state != NETREG_REGISTERED);
}
2009-10-27 07:04:19 +00:00
/* If device is running, close it first. */
dev_close_many(head);
2009-10-27 07:04:19 +00:00
list_for_each_entry(dev, head, unreg_list) {
2009-10-27 07:04:19 +00:00
/* And unlink it from device chain. */
unlist_netdevice(dev);
dev->reg_state = NETREG_UNREGISTERING;
}
2009-10-27 07:04:19 +00:00
synchronize_net();
2009-10-27 07:04:19 +00:00
list_for_each_entry(dev, head, unreg_list) {
/* Shutdown queueing discipline. */
dev_shutdown(dev);
2009-10-27 07:04:19 +00:00
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
2009-10-27 07:04:19 +00:00
/*
* Flush the unicast and multicast chains
*/
dev_uc_flush(dev);
2010-04-01 21:22:57 +00:00
dev_mc_flush(dev);
2009-10-27 07:04:19 +00:00
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
/* Notifier chain MUST detach us from master device. */
WARN_ON(dev->master);
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
}
/* Process any work delayed until the end of the batch */
2010-02-24 14:01:38 +00:00
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
synchronize_net();
list_for_each_entry(dev, head, unreg_list)
2009-10-27 07:04:19 +00:00
dev_put(dev);
}
2009-10-27 07:04:19 +00:00
static void rollback_registered(struct net_device *dev)
{
LIST_HEAD(single);
2009-10-27 07:04:19 +00:00
list_add(&dev->unreg_list, &single);
rollback_registered_many(&single);
2011-02-17 22:59:19 +00:00
list_del(&single);
}
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
/* Fix illegal checksum combinations */
if ((features & NETIF_F_HW_CSUM) &&
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
netdev_warn(dev, "mixed HW and IP checksum settings.\n");
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
netdev_dbg(dev,
"Dropping NETIF_F_SG since no checksum feature.\n");
features &= ~NETIF_F_SG;
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
features &= ~NETIF_F_ALL_TSO;
}
/* TSO ECN requires that TSO is present as well. */
if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
features &= ~NETIF_F_TSO_ECN;
2011-02-15 16:59:16 +00:00
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
2011-02-15 16:59:16 +00:00
features &= ~NETIF_F_GSO;
}
/* UFO needs SG and checksumming */
if (features & NETIF_F_UFO) {
/* maybe split UFO into V4 and V6? */
if (!((features & NETIF_F_GEN_CSUM) ||
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
netdev_dbg(dev,
"Dropping NETIF_F_UFO since no checksum offload features.\n");
features &= ~NETIF_F_UFO;
}
if (!(features & NETIF_F_SG)) {
netdev_dbg(dev,
"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
features &= ~NETIF_F_UFO;
}
}
return features;
}
int __netdev_update_features(struct net_device *dev)
2011-02-15 16:59:17 +00:00
{
netdev_features_t features;
2011-02-15 16:59:17 +00:00
int err = 0;
ASSERT_RTNL();
2011-02-15 16:59:17 +00:00
features = netdev_get_wanted_features(dev);
if (dev->netdev_ops->ndo_fix_features)
features = dev->netdev_ops->ndo_fix_features(dev, features);
/* driver might be less strict about feature dependencies */
features = netdev_fix_features(dev, features);
if (dev->features == features)
return 0;
2011-02-15 16:59:17 +00:00
netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
&dev->features, &features);
2011-02-15 16:59:17 +00:00
if (dev->netdev_ops->ndo_set_features)
err = dev->netdev_ops->ndo_set_features(dev, features);
if (unlikely(err < 0)) {
2011-02-15 16:59:17 +00:00
netdev_err(dev,
"set_features() failed (%d); wanted %pNF, left %pNF\n",
err, &features, &dev->features);
return -1;
}
if (!err)
dev->features = features;
return 1;
}
2011-05-07 03:22:17 +00:00
/**
* netdev_update_features - recalculate device features
* @dev: the device to check
*
* Recalculate dev->features set and send notifications if it
* has changed. Should be called after driver or hardware dependent
* conditions might have changed that influence the features.
*/
void netdev_update_features(struct net_device *dev)
{
if (__netdev_update_features(dev))
netdev_features_change(dev);
2011-02-15 16:59:17 +00:00
}
EXPORT_SYMBOL(netdev_update_features);
2011-05-07 03:22:17 +00:00
/**
* netdev_change_features - recalculate device features
* @dev: the device to check
*
* Recalculate dev->features set and send notifications even
* if they have not changed. Should be called instead of
* netdev_update_features() if also dev->vlan_features might
* have changed to allow the changes to be propagated to stacked
* VLAN devices.
*/
void netdev_change_features(struct net_device *dev)
{
__netdev_update_features(dev);
netdev_features_change(dev);
}
EXPORT_SYMBOL(netdev_change_features);
/**
* netif_stacked_transfer_operstate - transfer operstate
* @rootdev: the root or lower level device to transfer state from
* @dev: the device to transfer operstate to
*
* Transfer operational state from root to device. This is normally
* called when a stacking relationship exists between the root
* device and the device(a leaf device).
*/
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev)
{
if (rootdev->operstate == IF_OPER_DORMANT)
netif_dormant_on(dev);
else
netif_dormant_off(dev);
if (netif_carrier_ok(rootdev)) {
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
} else {
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
}
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
2010-11-26 08:36:09 +00:00
#ifdef CONFIG_RPS
static int netif_alloc_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
2010-10-18 18:00:16 +00:00
struct netdev_rx_queue *rx;
2010-10-18 18:00:16 +00:00
BUG_ON(count < 1);
2010-10-18 18:00:16 +00:00
rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
if (!rx) {
pr_err("netdev: Unable to allocate %u rx queues.\n", count);
return -ENOMEM;
}
2010-10-18 18:00:16 +00:00
dev->_rx = rx;
for (i = 0; i < count; i++)
2010-11-09 10:47:38 +00:00
rx[i].dev = dev;
return 0;
}
2010-11-26 08:36:09 +00:00
#endif
2010-12-04 02:31:41 +00:00
static void netdev_init_one_queue(struct net_device *dev,
struct netdev_queue *queue, void *_unused)
{
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
queue->xmit_lock_owner = -1;
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
2010-12-04 02:31:41 +00:00
queue->dev = dev;
2011-11-28 16:33:09 +00:00
#ifdef CONFIG_BQL
dql_init(&queue->dql, HZ);
#endif
2010-12-04 02:31:41 +00:00
}
static int netif_alloc_netdev_queues(struct net_device *dev)
{
unsigned int count = dev->num_tx_queues;
struct netdev_queue *tx;
BUG_ON(count < 1);
tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
if (!tx) {
pr_err("netdev: Unable to allocate %u tx queues.\n",
count);
return -ENOMEM;
}
dev->_tx = tx;
2010-11-21 13:17:27 +00:00
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
spin_lock_init(&dev->tx_global_lock);
2010-12-04 02:31:41 +00:00
return 0;
}
2005-04-16 15:20:36 -07:00
/**
* register_netdevice - register a network device
* @dev: device to register
*
* Take a completed network device structure and add it to the kernel
* interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
* chain. 0 is returned on success. A negative errno code is returned
* on a failure to set up the device, or if the name is a duplicate.
*
* Callers must hold the rtnl semaphore. You may want
* register_netdev() instead of this.
*
* BUGS:
* The locking appears insufficient to guarantee two parallel registers
* will not get the same name.
*/
int register_netdevice(struct net_device *dev)
{
int ret;
struct net *net = dev_net(dev);
2005-04-16 15:20:36 -07:00
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
might_sleep();
2005-04-16 15:20:36 -07:00
/* When net_device's are persistent, this will be fatal. */
BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
BUG_ON(!net);
2005-04-16 15:20:36 -07:00
spin_lock_init(&dev->addr_list_lock);
netdev_set_addr_lockdep_class(dev);
2005-04-16 15:20:36 -07:00
dev->iflink = -1;
ret = dev_get_valid_name(dev, dev->name);
if (ret < 0)
goto out;
2005-04-16 15:20:36 -07:00
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
2005-04-16 15:20:36 -07:00
if (ret) {
if (ret > 0)
ret = -EIO;
goto out;
2005-04-16 15:20:36 -07:00
}
}
2007-02-09 23:24:36 +09:00
dev->ifindex = dev_new_index(net);
2005-04-16 15:20:36 -07:00
if (dev->iflink == -1)
dev->iflink = dev->ifindex;
2011-02-15 16:59:17 +00:00
/* Transfer changeable features to wanted_features and enable
* software offloads (GSO and GRO).
*/
dev->hw_features |= NETIF_F_SOFT_FEATURES;
dev->features |= NETIF_F_SOFT_FEATURES;
dev->wanted_features = dev->features & dev->hw_features;
2005-04-16 15:20:36 -07:00
/* Turn on no cache copy if HW is doing checksum */
2011-11-15 15:29:55 +00:00
if (!(dev->flags & IFF_LOOPBACK)) {
dev->hw_features |= NETIF_F_NOCACHE_COPY;
if (dev->features & NETIF_F_ALL_CSUM) {
dev->wanted_features |= NETIF_F_NOCACHE_COPY;
dev->features |= NETIF_F_NOCACHE_COPY;
}
}
/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
2010-09-15 09:24:24 +00:00
*/
dev->vlan_features |= NETIF_F_HIGHDMA;
2010-09-15 09:24:24 +00:00
2009-10-02 05:15:27 +00:00
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)
goto err_uninit;
2007-09-26 22:02:53 -07:00
ret = netdev_register_kobject(dev);
if (ret)
goto err_uninit;
dev->reg_state = NETREG_REGISTERED;
__netdev_update_features(dev);
2005-04-16 15:20:36 -07:00
/*
* Default initial state at registry is that the
* device is present.
*/
set_bit(__LINK_STATE_PRESENT, &dev->state);
dev_init_scheduler(dev);
dev_hold(dev);
list_netdevice(dev);
2005-04-16 15:20:36 -07:00
/* Notify protocols, that a new device appeared. */
2007-09-16 15:42:43 -07:00
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
ret = notifier_to_errno(ret);
if (ret) {
rollback_registered(dev);
dev->reg_state = NETREG_UNREGISTERED;
}
/*
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
2005-04-16 15:20:36 -07:00
out:
return ret;
err_uninit:
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
goto out;
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(register_netdevice);
2005-04-16 15:20:36 -07:00
/**
* init_dummy_netdev - init a dummy network device for NAPI
* @dev: device to init
*
* This takes a network device structure and initialize the minimum
* amount of fields so it can be used to schedule NAPI polls without
* registering a full blown interface. This is to be used by drivers
* that need to tie several hardware interfaces to a single NAPI
* poll scheduler due to HW limitations.
*/
int init_dummy_netdev(struct net_device *dev)
{
/* Clear everything. Note we don't initialize spinlocks
* are they aren't supposed to be taken by any of the
* NAPI code and this dummy netdev is supposed to be
* only ever used for NAPI polls
*/
memset(dev, 0, sizeof(struct net_device));
/* make sure we BUG if trying to hit standard
* register/unregister code path
*/
dev->reg_state = NETREG_DUMMY;
/* NAPI wants this */
INIT_LIST_HEAD(&dev->napi_list);
/* a dummy interface is started by default */
set_bit(__LINK_STATE_PRESENT, &dev->state);
set_bit(__LINK_STATE_START, &dev->state);
2010-10-11 10:22:12 +00:00
/* Note : We dont allocate pcpu_refcnt for dummy devices,
* because users of this 'device' dont need to change
* its refcount.
*/
return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);
2005-04-16 15:20:36 -07:00
/**
* register_netdev - register a network device
* @dev: device to register
*
* Take a completed network device structure and add it to the kernel
* interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
* chain. 0 is returned on success. A negative errno code is returned
* on a failure to set up the device, or if the name is a duplicate.
*
2007-04-20 22:14:10 -07:00
* This is a wrapper around register_netdevice that takes the rtnl semaphore
2005-04-16 15:20:36 -07:00
* and expands the device name if you passed a format string to
* alloc_netdev.
*/
int register_netdev(struct net_device *dev)
{
int err;
rtnl_lock();
err = register_netdevice(dev);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL(register_netdev);
2010-10-11 10:22:12 +00:00
int netdev_refcnt_read(const struct net_device *dev)
{
int i, refcnt = 0;
for_each_possible_cpu(i)
refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
return refcnt;
}
EXPORT_SYMBOL(netdev_refcnt_read);
2005-04-16 15:20:36 -07:00
/*
* netdev_wait_allrefs - wait until all references are gone.
*
* This is called when unregistering network devices.
*
* Any protocol or device that holds a reference should register
* for netdevice notification, and cleanup and put back the
* reference if they receive an UNREGISTER event.
* We can get stuck here if buggy protocols don't correctly
2007-02-09 23:24:36 +09:00
* call dev_put.
2005-04-16 15:20:36 -07:00
*/
static void netdev_wait_allrefs(struct net_device *dev)
{
unsigned long rebroadcast_time, warning_time;
2010-10-11 10:22:12 +00:00
int refcnt;
2005-04-16 15:20:36 -07:00
linkwatch_forget_dev(dev);
2005-04-16 15:20:36 -07:00
rebroadcast_time = warning_time = jiffies;
2010-10-11 10:22:12 +00:00
refcnt = netdev_refcnt_read(dev);
while (refcnt != 0) {
2005-04-16 15:20:36 -07:00
if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2006-03-20 22:23:58 -08:00
rtnl_lock();
2005-04-16 15:20:36 -07:00
/* Rebroadcast unregister notification */
2007-09-16 15:42:43 -07:00
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
/* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
2009-11-16 13:49:35 +00:00
* should have already handle it the first time */
2005-04-16 15:20:36 -07:00
if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
&dev->state)) {
/* We must not have linkwatch events
* pending on unregister. If this
* happens, we simply run the queue
* unscheduled, resulting in a noop
* for this device.
*/
linkwatch_run_queue();
}
2006-03-20 22:23:58 -08:00
__rtnl_unlock();
2005-04-16 15:20:36 -07:00
rebroadcast_time = jiffies;
}
msleep(250);
2010-10-11 10:22:12 +00:00
refcnt = netdev_refcnt_read(dev);
2005-04-16 15:20:36 -07:00
if (time_after(jiffies, warning_time + 10 * HZ)) {
printk(KERN_EMERG "unregister_netdevice: "
"waiting for %s to become free. Usage "
"count = %d\n",
2010-10-11 10:22:12 +00:00
dev->name, refcnt);
2005-04-16 15:20:36 -07:00
warning_time = jiffies;
}
}
}
/* The sequence is:
*
* rtnl_lock();
* ...
* register_netdevice(x1);
* register_netdevice(x2);
* ...
* unregister_netdevice(y1);
* unregister_netdevice(y2);
* ...
* rtnl_unlock();
* free_netdev(y1);
* free_netdev(y2);
*
2008-10-07 15:50:03 -07:00
* We are invoked by rtnl_unlock().
2005-04-16 15:20:36 -07:00
* This allows us to deal with problems:
* 1) We can delete sysfs objects which invoke hotplug
2005-04-16 15:20:36 -07:00
* without deadlocking with linkwatch via keventd.
* 2) Since we run with the RTNL semaphore not held, we can sleep
* safely in order to wait for the netdev refcnt to drop to zero.
2008-10-07 15:50:03 -07:00
*
* We must not return until all unregister events added during
* the interval the lock was held have been completed.
2005-04-16 15:20:36 -07:00
*/
void netdev_run_todo(void)
{
struct list_head list;
2005-04-16 15:20:36 -07:00
/* Snapshot list, allow later requests */
list_replace_init(&net_todo_list, &list);
2008-10-07 15:50:03 -07:00
__rtnl_unlock();
/* Wait for rcu callbacks to finish before attempting to drain
* the device list. This usually avoids a 250ms wait.
*/
if (!list_empty(&list))
rcu_barrier();
2005-04-16 15:20:36 -07:00
while (!list_empty(&list)) {
struct net_device *dev
2010-02-24 14:01:38 +00:00
= list_first_entry(&list, struct net_device, todo_list);
2005-04-16 15:20:36 -07:00
list_del(&dev->todo_list);
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
2005-04-16 15:20:36 -07:00
printk(KERN_ERR "network todo '%s' but state %d\n",
dev->name, dev->reg_state);
dump_stack();
continue;
2005-04-16 15:20:36 -07:00
}
dev->reg_state = NETREG_UNREGISTERED;
on_each_cpu(flush_backlog, dev, 1);
2008-08-03 21:29:57 -07:00
netdev_wait_allrefs(dev);
/* paranoia */
2010-10-11 10:22:12 +00:00
BUG_ON(netdev_refcnt_read(dev));
2011-08-11 19:30:52 +00:00
WARN_ON(rcu_access_pointer(dev->ip_ptr));
WARN_ON(rcu_access_pointer(dev->ip6_ptr));
2008-07-25 21:43:18 -07:00
WARN_ON(dev->dn_ptr);
if (dev->destructor)
dev->destructor(dev);
/* Free network device */
kobject_put(&dev->dev.kobj);
2005-04-16 15:20:36 -07:00
}
}
/* Convert net_device_stats to rtnl_link_stats64. They have the same
* fields in the same order, with only the type differing.
*/
static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats)
{
#if BITS_PER_LONG == 64
BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
memcpy(stats64, netdev_stats, sizeof(*stats64));
#else
size_t i, n = sizeof(*stats64) / sizeof(u64);
const unsigned long *src = (const unsigned long *)netdev_stats;
u64 *dst = (u64 *)stats64;
BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++)
dst[i] = src[i];
#endif
}
2008-11-19 21:40:23 -08:00
/**
* dev_get_stats - get network device statistics
* @dev: device to get statistics from
2010-07-07 14:58:56 -07:00
* @storage: place to store stats
2008-11-19 21:40:23 -08:00
*
* Get network statistics from device. Return @storage.
* The device driver may provide its own method by setting
* dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
* otherwise the internal statistics structure is used.
2008-11-19 21:40:23 -08:00
*/
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage)
{
2008-11-19 21:40:23 -08:00
const struct net_device_ops *ops = dev->netdev_ops;
2010-07-07 14:58:56 -07:00
if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage));
2010-09-30 21:06:55 +00:00
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
2010-09-30 21:06:55 +00:00
} else {
netdev_stats_to_stats64(storage, &dev->stats);
2010-07-07 14:58:56 -07:00
}
2010-09-30 21:06:55 +00:00
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
2010-07-07 14:58:56 -07:00
return storage;
2007-03-28 14:29:08 -07:00
}
2008-11-19 21:40:23 -08:00
EXPORT_SYMBOL(dev_get_stats);
2007-03-28 14:29:08 -07:00
2010-10-02 06:11:55 +00:00
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
{
2010-10-02 06:11:55 +00:00
struct netdev_queue *queue = dev_ingress_queue(dev);
2010-10-02 06:11:55 +00:00
#ifdef CONFIG_NET_CLS_ACT
if (queue)
return queue;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
return NULL;
netdev_init_one_queue(dev, queue, NULL);
queue->qdisc = &noop_qdisc;
queue->qdisc_sleeping = &noop_qdisc;
rcu_assign_pointer(dev->ingress_queue, queue);
#endif
return queue;
2008-07-08 16:55:56 -07:00
}
2005-04-16 15:20:36 -07:00
/**
2011-01-09 19:36:31 +00:00
* alloc_netdev_mqs - allocate network device
2005-04-16 15:20:36 -07:00
* @sizeof_priv: size of private data to allocate space for
* @name: device name format string
* @setup: callback to initialize device
2011-01-09 19:36:31 +00:00
* @txqs: the number of TX subqueues to allocate
* @rxqs: the number of RX subqueues to allocate
2005-04-16 15:20:36 -07:00
*
* Allocates a struct net_device with private data area for driver use
* and performs basic initialization. Also allocates subquue structs
2011-01-09 19:36:31 +00:00
* for each queue on the device.
2005-04-16 15:20:36 -07:00
*/
2011-01-09 19:36:31 +00:00
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs)
2005-04-16 15:20:36 -07:00
{
struct net_device *dev;
2008-07-21 13:28:44 -07:00
size_t alloc_size;
struct net_device *p;
2005-04-16 15:20:36 -07:00
2006-08-29 17:06:13 -07:00
BUG_ON(strlen(name) >= sizeof(dev->name));
2011-01-09 19:36:31 +00:00
if (txqs < 1) {
pr_err("alloc_netdev: Unable to allocate device "
"with zero queues.\n");
return NULL;
}
2011-01-09 19:36:31 +00:00
#ifdef CONFIG_RPS
if (rxqs < 1) {
pr_err("alloc_netdev: Unable to allocate device "
"with zero RX queues.\n");
return NULL;
}
#endif
2008-07-17 01:56:23 -07:00
alloc_size = sizeof(struct net_device);
if (sizeof_priv) {
/* ensure 32-byte alignment of private area */
alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
alloc_size += sizeof_priv;
}
/* ensure 32-byte alignment of whole construct */
alloc_size += NETDEV_ALIGN - 1;
2005-04-16 15:20:36 -07:00
2006-04-06 22:38:28 -07:00
p = kzalloc(alloc_size, GFP_KERNEL);
2005-04-16 15:20:36 -07:00
if (!p) {
2006-08-29 17:06:13 -07:00
printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
2005-04-16 15:20:36 -07:00
return NULL;
}
dev = PTR_ALIGN(p, NETDEV_ALIGN);
2005-04-16 15:20:36 -07:00
dev->padded = (char *)dev - (char *)p;
2009-05-08 13:30:17 +00:00
2010-10-11 10:22:12 +00:00
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt)
goto free_p;
2009-05-08 13:30:17 +00:00
if (dev_addr_init(dev))
2010-10-11 10:22:12 +00:00
goto free_pcpu;
2009-05-08 13:30:17 +00:00
2010-04-01 21:22:57 +00:00
dev_mc_init(dev);
dev_uc_init(dev);
2009-05-22 23:22:17 +00:00
dev_net_set(dev, &init_net);
2005-04-16 15:20:36 -07:00
dev->gso_max_size = GSO_MAX_SIZE;
2005-04-16 15:20:36 -07:00
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->link_watch_list);
dev->priv_flags = IFF_XMIT_DST_RELEASE;
2005-04-16 15:20:36 -07:00
setup(dev);
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev))
goto free_all;
#ifdef CONFIG_RPS
dev->num_rx_queues = rxqs;
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_all;
#endif
2005-04-16 15:20:36 -07:00
strcpy(dev->name, name);
dev->group = INIT_NETDEV_GROUP;
2005-04-16 15:20:36 -07:00
return dev;
2009-05-08 13:30:17 +00:00
free_all:
free_netdev(dev);
return NULL;
2010-10-11 10:22:12 +00:00
free_pcpu:
free_percpu(dev->pcpu_refcnt);
kfree(dev->_tx);
2010-11-09 10:47:38 +00:00
#ifdef CONFIG_RPS
kfree(dev->_rx);
#endif
2009-05-08 13:30:17 +00:00
free_p:
kfree(p);
return NULL;
2005-04-16 15:20:36 -07:00
}
2011-01-09 19:36:31 +00:00
EXPORT_SYMBOL(alloc_netdev_mqs);
2005-04-16 15:20:36 -07:00
/**
* free_netdev - free network device
* @dev: device
*
2007-02-09 23:24:36 +09:00
* This function does the last stage of destroying an allocated device
* interface. The reference to the device object is released.
2005-04-16 15:20:36 -07:00
* If this is the last reference then it will be freed.
*/
void free_netdev(struct net_device *dev)
{
struct napi_struct *p, *n;
release_net(dev_net(dev));
2008-07-17 00:34:19 -07:00
kfree(dev->_tx);
2010-11-09 10:47:38 +00:00
#ifdef CONFIG_RPS
kfree(dev->_rx);
#endif
2008-07-17 00:34:19 -07:00
2011-08-11 19:30:52 +00:00
kfree(rcu_dereference_protected(dev->ingress_queue, 1));
2010-10-02 06:11:55 +00:00
/* Flush device addresses */
dev_addr_flush(dev);
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
netif_napi_del(p);
2010-10-11 10:22:12 +00:00
free_percpu(dev->pcpu_refcnt);
dev->pcpu_refcnt = NULL;
2006-05-26 13:25:24 -07:00
/* Compatibility with error handling in drivers */
2005-04-16 15:20:36 -07:00
if (dev->reg_state == NETREG_UNINITIALIZED) {
kfree((char *)dev - dev->padded);
return;
}
BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
dev->reg_state = NETREG_RELEASED;
/* will free via device release */
put_device(&dev->dev);
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(free_netdev);
2007-02-09 23:24:36 +09:00
2008-09-30 02:23:58 -07:00
/**
* synchronize_net - Synchronize with packet receive processing
*
* Wait for packets currently being received to be done.
* Does not block later packets from starting.
*/
2007-02-09 23:24:36 +09:00
void synchronize_net(void)
2005-04-16 15:20:36 -07:00
{
might_sleep();
2011-05-23 23:07:32 +00:00
if (rtnl_is_locked())
synchronize_rcu_expedited();
else
synchronize_rcu();
2005-04-16 15:20:36 -07:00
}
2009-09-03 01:29:39 -07:00
EXPORT_SYMBOL(synchronize_net);
2005-04-16 15:20:36 -07:00
/**
2009-10-27 07:03:04 +00:00
* unregister_netdevice_queue - remove device from the kernel
2005-04-16 15:20:36 -07:00
* @dev: device
2009-10-27 07:03:04 +00:00
* @head: list
2009-11-22 20:43:13 -08:00
*
2005-04-16 15:20:36 -07:00
* This function shuts down a device interface and removes it
* from the kernel tables.
2009-10-27 07:03:04 +00:00
* If head not NULL, device is queued to be unregistered later.
2005-04-16 15:20:36 -07:00
*
* Callers must hold the rtnl semaphore. You may want
* unregister_netdev() instead of this.
*/
2009-10-27 07:03:04 +00:00
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
2005-04-16 15:20:36 -07:00
{
ASSERT_RTNL();
2009-10-27 07:03:04 +00:00
if (head) {
list_move_tail(&dev->unreg_list, head);
2009-10-27 07:03:04 +00:00
} else {
rollback_registered(dev);
/* Finish processing unregister after unlock */
net_set_todo(dev);
}
2005-04-16 15:20:36 -07:00
}
2009-10-27 07:03:04 +00:00
EXPORT_SYMBOL(unregister_netdevice_queue);
2005-04-16 15:20:36 -07:00
2009-10-27 07:04:19 +00:00
/**
* unregister_netdevice_many - unregister many devices
* @head: list of devices
*/
void unregister_netdevice_many(struct list_head *head)
{
struct net_device *dev;
if (!list_empty(head)) {
rollback_registered_many(head);
list_for_each_entry(dev, head, unreg_list)
net_set_todo(dev);
}
}
2009-10-27 07:06:49 +00:00
EXPORT_SYMBOL(unregister_netdevice_many);
2009-10-27 07:04:19 +00:00
2005-04-16 15:20:36 -07:00
/**
* unregister_netdev - remove device from the kernel
* @dev: device
*
* This function shuts down a device interface and removes it
* from the kernel tables.
2005-04-16 15:20:36 -07:00
*
* This is just a wrapper for unregister_netdevice that takes
* the rtnl semaphore. In general you want to use this and not
* unregister_netdevice.
*/
void unregister_netdev(struct net_device *dev)
{
rtnl_lock();
unregister_netdevice(dev);
rtnl_unlock();
}
EXPORT_SYMBOL(unregister_netdev);
/**
* dev_change_net_namespace - move device to different nethost namespace
* @dev: device
* @net: network namespace
* @pat: If not NULL name pattern to try if the current device name
* is already taken in the destination network namespace.
*
* This function shuts down a device interface and moves it
* to a new network namespace. On success 0 is returned, on
* a failure a netagive errno code is returned.
*
* Callers must hold the rtnl semaphore.
*/
int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
{
int err;
ASSERT_RTNL();
/* Don't allow namespace local devices to be moved. */
err = -EINVAL;
if (dev->features & NETIF_F_NETNS_LOCAL)
goto out;
/* Ensure the device has been registrered */
err = -EINVAL;
if (dev->reg_state != NETREG_REGISTERED)
goto out;
/* Get out if there is nothing todo */
err = 0;
if (net_eq(dev_net(dev), net))
goto out;
/* Pick the destination device name, and ensure
* we can use it in the destination network namespace.
*/
err = -EEXIST;
2009-11-18 02:36:59 +00:00
if (__dev_get_by_name(net, dev->name)) {
/* We get here if we can't use the current device name */
if (!pat)
goto out;
if (dev_get_valid_name(dev, pat) < 0)
goto out;
}
/*
* And now a mini version of register_netdevice unregister_netdevice.
*/
/* If device is running close it first. */
dev_close(dev);
/* And unlink it from device chain */
err = -ENODEV;
unlist_netdevice(dev);
synchronize_net();
/* Shutdown queueing discipline. */
dev_shutdown(dev);
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
Note that dev->reg_state stays at NETREG_REGISTERED.
This is wanted because this way 8021q and macvlan know
the device is just moving and can keep their slaves up.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/*
* Flush the unicast and multicast chains
*/
dev_uc_flush(dev);
2010-04-01 21:22:57 +00:00
dev_mc_flush(dev);
/* Actually switch the network namespace */
dev_net_set(dev, net);
/* If there is an ifindex conflict assign a new one */
if (__dev_get_by_index(net, dev->ifindex)) {
int iflink = (dev->iflink == dev->ifindex);
dev->ifindex = dev_new_index(net);
if (iflink)
dev->iflink = dev->ifindex;
}
2007-09-26 22:02:53 -07:00
/* Fixup kobjects */
err = device_rename(&dev->dev, dev->name);
2007-09-26 22:02:53 -07:00
WARN_ON(err);
/* Add the device back in the hashes */
list_netdevice(dev);
/* Notify protocols, that a new device appeared. */
call_netdevice_notifiers(NETDEV_REGISTER, dev);
/*
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
synchronize_net();
err = 0;
out:
return err;
}
2009-07-14 00:33:35 +02:00
EXPORT_SYMBOL_GPL(dev_change_net_namespace);
2005-04-16 15:20:36 -07:00
static int dev_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *ocpu)
{
struct sk_buff **list_skb;
struct sk_buff *skb;
unsigned int cpu, oldcpu = (unsigned long)ocpu;
struct softnet_data *sd, *oldsd;
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2005-04-16 15:20:36 -07:00
return NOTIFY_OK;
local_irq_disable();
cpu = smp_processor_id();
sd = &per_cpu(softnet_data, cpu);
oldsd = &per_cpu(softnet_data, oldcpu);
/* Find end of our completion_queue. */
list_skb = &sd->completion_queue;
while (*list_skb)
list_skb = &(*list_skb)->next;
/* Append completion queue from offline CPU. */
*list_skb = oldsd->completion_queue;
oldsd->completion_queue = NULL;
/* Append output queue from offline CPU. */
if (oldsd->output_queue) {
*sd->output_queue_tailp = oldsd->output_queue;
sd->output_queue_tailp = oldsd->output_queue_tailp;
oldsd->output_queue = NULL;
oldsd->output_queue_tailp = &oldsd->output_queue;
}
2011-06-06 20:50:03 +00:00
/* Append NAPI poll list from offline CPU. */
if (!list_empty(&oldsd->poll_list)) {
list_splice_init(&oldsd->poll_list, &sd->poll_list);
raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
2005-04-16 15:20:36 -07:00
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
netif_rx(skb);
input_queue_head_incr(oldsd);
}
2010-04-16 16:01:27 -07:00
while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
2005-04-16 15:20:36 -07:00
netif_rx(skb);
input_queue_head_incr(oldsd);
2010-04-16 16:01:27 -07:00
}
2005-04-16 15:20:36 -07:00
return NOTIFY_OK;
}
/**
* netdev_increment_features - increment feature set by one
* @all: current feature set
* @one: new feature set
* @mask: mask feature set
*
* Computes a new feature set after adding a device with feature set
* @one to the master device with current feature set @all. Will not
* enable anything that is off in @mask. Returns the new feature set.
*/
netdev_features_t netdev_increment_features(netdev_features_t all,
netdev_features_t one, netdev_features_t mask)
{
2011-04-22 06:31:16 +00:00
if (mask & NETIF_F_GEN_CSUM)
mask |= NETIF_F_ALL_CSUM;
mask |= NETIF_F_VLAN_CHALLENGED;
all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
all &= one | ~NETIF_F_ALL_FOR_ALL;
/* If one device supports hw checksumming, set for all. */
if (all & NETIF_F_GEN_CSUM)
all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
return all;
}
EXPORT_SYMBOL(netdev_increment_features);
static struct hlist_head *netdev_create_hash(void)
{
int i;
struct hlist_head *hash;
hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
if (hash != NULL)
for (i = 0; i < NETDEV_HASHENTRIES; i++)
INIT_HLIST_HEAD(&hash[i]);
return hash;
}
/* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net)
{
INIT_LIST_HEAD(&net->dev_base_head);
net->dev_name_head = netdev_create_hash();
if (net->dev_name_head == NULL)
goto err_name;
net->dev_index_head = netdev_create_hash();
if (net->dev_index_head == NULL)
goto err_idx;
return 0;
err_idx:
kfree(net->dev_name_head);
err_name:
return -ENOMEM;
}
2008-09-30 02:23:58 -07:00
/**
* netdev_drivername - network driver for the device
* @dev: network device
*
* Determine network driver for device.
*/
const char *netdev_drivername(const struct net_device *dev)
{
2008-09-30 02:22:14 -07:00
const struct device_driver *driver;
const struct device *parent;
const char *empty = "";
parent = dev->dev.parent;
if (!parent)
return empty;
driver = parent->driver;
if (driver && driver->name)
return driver->name;
return empty;
}
int __netdev_printk(const char *level, const struct net_device *dev,
struct va_format *vaf)
{
int r;
if (dev && dev->dev.parent)
r = dev_printk(level, dev->dev.parent, "%s: %pV",
netdev_name(dev), vaf);
else if (dev)
r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
else
r = printk("%s(NULL net_device): %pV", level, vaf);
return r;
}
EXPORT_SYMBOL(__netdev_printk);
int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
{
struct va_format vaf;
va_list args;
int r;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
r = __netdev_printk(level, dev, &vaf);
va_end(args);
return r;
}
EXPORT_SYMBOL(netdev_printk);
#define define_netdev_printk_level(func, level) \
int func(const struct net_device *dev, const char *fmt, ...) \
{ \
int r; \
struct va_format vaf; \
va_list args; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
r = __netdev_printk(level, dev, &vaf); \
va_end(args); \
\
return r; \
} \
EXPORT_SYMBOL(func);
define_netdev_printk_level(netdev_emerg, KERN_EMERG);
define_netdev_printk_level(netdev_alert, KERN_ALERT);
define_netdev_printk_level(netdev_crit, KERN_CRIT);
define_netdev_printk_level(netdev_err, KERN_ERR);
define_netdev_printk_level(netdev_warn, KERN_WARNING);
define_netdev_printk_level(netdev_notice, KERN_NOTICE);
define_netdev_printk_level(netdev_info, KERN_INFO);
static void __net_exit netdev_exit(struct net *net)
{
kfree(net->dev_name_head);
kfree(net->dev_index_head);
}
static struct pernet_operations __net_initdata netdev_net_ops = {
.init = netdev_init,
.exit = netdev_exit,
};
static void __net_exit default_device_exit(struct net *net)
{
struct net_device *dev, *aux;
/*
* Push all migratable network devices back to the
* initial network namespace
*/
rtnl_lock();
for_each_netdev_safe(net, dev, aux) {
int err;
char fb_name[IFNAMSIZ];
/* Ignore unmoveable devices (i.e. loopback) */
if (dev->features & NETIF_F_NETNS_LOCAL)
continue;
/* Leave virtual devices for the generic cleanup */
if (dev->rtnl_link_ops)
continue;
2011-03-30 22:57:33 -03:00
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
__func__, dev->name, err);
BUG();
}
}
rtnl_unlock();
}
2009-12-03 02:29:04 +00:00
static void __net_exit default_device_exit_batch(struct list_head *net_list)
{
/* At exit all network devices most be removed from a network
2010-11-01 15:38:34 -04:00
* namespace. Do this in the reverse order of registration.
2009-12-03 02:29:04 +00:00
* Do this across as many network namespaces as possible to
* improve batching efficiency.
*/
struct net_device *dev;
struct net *net;
LIST_HEAD(dev_kill_list);
rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
for_each_netdev_reverse(net, dev) {
if (dev->rtnl_link_ops)
dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
else
unregister_netdevice_queue(dev, &dev_kill_list);
}
}
unregister_netdevice_many(&dev_kill_list);
2011-02-17 22:59:19 +00:00
list_del(&dev_kill_list);
2009-12-03 02:29:04 +00:00
rtnl_unlock();
}
static struct pernet_operations __net_initdata default_device_ops = {
.exit = default_device_exit,
2009-12-03 02:29:04 +00:00
.exit_batch = default_device_exit_batch,
};
2005-04-16 15:20:36 -07:00
/*
* Initialize the DEV module. At boot time this walks the device list and
* unhooks any devices that fail to initialise (normally hardware not
* present) and leaves us with a valid list of present and active devices.
*
*/
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
*/
static int __init net_dev_init(void)
{
int i, rc = -ENOMEM;
BUG_ON(!dev_boot_phase);
if (dev_proc_init())
goto out;
2007-09-26 22:02:53 -07:00
if (netdev_kobject_init())
2005-04-16 15:20:36 -07:00
goto out;
INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < PTYPE_HASH_SIZE; i++)
2005-04-16 15:20:36 -07:00
INIT_LIST_HEAD(&ptype_base[i]);
if (register_pernet_subsys(&netdev_net_ops))
goto out;
2005-04-16 15:20:36 -07:00
/*
* Initialise the packet receive queues.
*/
for_each_possible_cpu(i) {
2010-04-19 21:17:14 +00:00
struct softnet_data *sd = &per_cpu(softnet_data, i);
2005-04-16 15:20:36 -07:00
2010-05-02 05:42:16 +00:00
memset(sd, 0, sizeof(*sd));
2010-04-19 21:17:14 +00:00
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
2010-04-19 21:17:14 +00:00
sd->completion_queue = NULL;
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue = NULL;
sd->output_queue_tailp = &sd->output_queue;
2010-03-24 19:13:54 +00:00
#ifdef CONFIG_RPS
2010-04-19 21:17:14 +00:00
sd->csd.func = rps_trigger_softirq;
sd->csd.info = sd;
sd->csd.flags = 0;
sd->cpu = i;
#endif
2010-03-16 08:03:29 +00:00
2010-04-19 21:17:14 +00:00
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
sd->backlog.gro_list = NULL;
sd->backlog.gro_count = 0;
2005-04-16 15:20:36 -07:00
}
dev_boot_phase = 0;
/* The loopback device is special if any other network devices
* is present in a network namespace the loopback device must
* be present. Since we now dynamically allocate and free the
* loopback device ensure this invariant is maintained by
* keeping the loopback device as the first device on the
* list of network devices. Ensuring the loopback devices
* is the first device that appears and the last network device
* that disappears.
*/
if (register_pernet_device(&loopback_net_ops))
goto out;
if (register_pernet_device(&default_device_ops))
goto out;
open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
2005-04-16 15:20:36 -07:00
hotcpu_notifier(dev_cpu_callback, 0);
dst_init();
dev_mcast_init();
rc = 0;
out:
return rc;
}
subsys_initcall(net_dev_init);
static int __init initialize_hashrnd(void)
{
2010-03-16 08:03:29 +00:00
get_random_bytes(&hashrnd, sizeof(hashrnd));
return 0;
}
late_initcall_sync(initialize_hashrnd);