Files
linux-apfs/include/linux/netpoll.h
T

181 lines
4.0 KiB
C
Raw Normal View History

2005-04-16 15:20:36 -07:00
/*
* Common code for low-level network console, dump, and debugger code
*
* Derived from netconsole, kgdb-over-ethernet, and netdump patches
*/
#ifndef _LINUX_NETPOLL_H
#define _LINUX_NETPOLL_H
#include <linux/netdevice.h>
#include <linux/interrupt.h>
2005-08-11 19:27:43 -07:00
#include <linux/rcupdate.h>
2005-04-16 15:20:36 -07:00
#include <linux/list.h>
2013-01-07 20:52:39 +00:00
union inet_addr {
__u32 all[4];
__be32 ip;
__be32 ip6[4];
struct in_addr in;
struct in6_addr in6;
};
2005-04-16 15:20:36 -07:00
struct netpoll {
struct net_device *dev;
2006-10-26 15:46:56 -07:00
char dev_name[IFNAMSIZ];
const char *name;
2005-04-16 15:20:36 -07:00
void (*rx_hook)(struct netpoll *, int, char *, int);
2006-10-26 15:46:55 -07:00
2013-01-07 20:52:39 +00:00
union inet_addr local_ip, remote_ip;
bool ipv6;
2005-04-16 15:20:36 -07:00
u16 local_port, remote_port;
2007-11-19 19:23:29 -08:00
u8 remote_mac[ETH_ALEN];
struct list_head rx; /* rx_np list element */
struct work_struct cleanup_work;
2005-06-22 22:05:31 -07:00
};
struct netpoll_info {
2006-10-26 15:46:50 -07:00
atomic_t refcnt;
unsigned long rx_flags;
spinlock_t rx_lock;
struct mutex dev_lock;
struct list_head rx_np; /* netpolls that registered an rx_hook */
2013-01-07 20:52:39 +00:00
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
2006-10-26 15:46:51 -07:00
struct sk_buff_head txq;
struct delayed_work tx_work;
struct netpoll *netpoll;
2012-08-10 01:24:38 +00:00
struct rcu_head rcu;
2005-04-16 15:20:36 -07:00
};
#ifdef CONFIG_NETPOLL
extern int netpoll_rx_disable(struct net_device *dev);
extern void netpoll_rx_enable(struct net_device *dev);
#else
static inline int netpoll_rx_disable(struct net_device *dev) { return 0; }
static inline void netpoll_rx_enable(struct net_device *dev) { return; }
#endif
2005-04-16 15:20:36 -07:00
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
2005-04-16 15:20:36 -07:00
int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
2005-04-16 15:20:36 -07:00
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free_async(struct netpoll *np);
2005-04-16 15:20:36 -07:00
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev);
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
unsigned long flags;
local_irq_save(flags);
netpoll_send_skb_on_dev(np, skb, np->dev);
local_irq_restore(flags);
}
2006-10-26 15:46:55 -07:00
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_NETPOLL
2012-08-10 01:24:46 +00:00
static inline bool netpoll_rx_on(struct sk_buff *skb)
{
struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
}
static inline bool netpoll_rx(struct sk_buff *skb)
2005-04-16 15:20:36 -07:00
{
2010-06-10 16:12:44 +00:00
struct netpoll_info *npinfo;
unsigned long flags;
bool ret = false;
2005-06-22 22:05:31 -07:00
local_irq_save(flags);
2010-06-10 16:12:44 +00:00
if (!netpoll_rx_on(skb))
2010-06-10 16:12:44 +00:00
goto out;
2005-06-22 22:05:31 -07:00
npinfo = rcu_dereference_bh(skb->dev->npinfo);
spin_lock(&npinfo->rx_lock);
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
ret = true;
spin_unlock(&npinfo->rx_lock);
2010-06-10 16:12:44 +00:00
out:
local_irq_restore(flags);
return ret;
2005-04-16 15:20:36 -07:00
}
static inline int netpoll_receive_skb(struct sk_buff *skb)
2005-04-16 15:20:36 -07:00
{
if (!list_empty(&skb->dev->napi_list))
return netpoll_rx(skb);
return 0;
}
static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
if (dev && dev->npinfo) {
spin_lock(&napi->poll_lock);
napi->poll_owner = smp_processor_id();
return napi;
2005-04-16 15:20:36 -07:00
}
2005-08-11 19:27:43 -07:00
return NULL;
2005-04-16 15:20:36 -07:00
}
2005-08-11 19:27:43 -07:00
static inline void netpoll_poll_unlock(void *have)
2005-04-16 15:20:36 -07:00
{
struct napi_struct *napi = have;
2005-08-11 19:27:43 -07:00
if (napi) {
napi->poll_owner = -1;
spin_unlock(&napi->poll_lock);
2005-04-16 15:20:36 -07:00
}
}
2012-08-10 01:24:46 +00:00
static inline bool netpoll_tx_running(struct net_device *dev)
2010-06-10 16:12:49 +00:00
{
return irqs_disabled();
}
2005-04-16 15:20:36 -07:00
#else
static inline bool netpoll_rx(struct sk_buff *skb)
{
2012-08-10 01:24:46 +00:00
return false;
}
2012-08-10 01:24:46 +00:00
static inline bool netpoll_rx_on(struct sk_buff *skb)
{
2012-08-10 01:24:46 +00:00
return false;
}
static inline int netpoll_receive_skb(struct sk_buff *skb)
{
return 0;
}
static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
return NULL;
}
static inline void netpoll_poll_unlock(void *have)
{
}
static inline void netpoll_netdev_init(struct net_device *dev)
{
}
2012-08-10 01:24:46 +00:00
static inline bool netpoll_tx_running(struct net_device *dev)
2010-06-10 16:12:49 +00:00
{
2012-08-10 01:24:46 +00:00
return false;
2010-06-10 16:12:49 +00:00
}
2005-04-16 15:20:36 -07:00
#endif
#endif