You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
This commit is contained in:
+1
-1
@@ -23,7 +23,7 @@
|
||||
#include <asm/atomic.h>
|
||||
#include "br_private.h"
|
||||
|
||||
static kmem_cache_t *br_fdb_cache __read_mostly;
|
||||
static struct kmem_cache *br_fdb_cache __read_mostly;
|
||||
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
|
||||
const unsigned char *addr);
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
#include <linux/netfilter_ipv6.h>
|
||||
#include <linux/netfilter_arp.h>
|
||||
#include <linux/in_route.h>
|
||||
#include <linux/inetdevice.h>
|
||||
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
@@ -221,10 +222,14 @@ static void __br_dnat_complain(void)
|
||||
*
|
||||
* Otherwise, the packet is considered to be routed and we just
|
||||
* change the destination MAC address so that the packet will
|
||||
* later be passed up to the IP stack to be routed.
|
||||
* later be passed up to the IP stack to be routed. For a redirected
|
||||
* packet, ip_route_input() will give back the localhost as output device,
|
||||
* which differs from the bridge device.
|
||||
*
|
||||
* Let us now consider the case that ip_route_input() fails:
|
||||
*
|
||||
* This can be because the destination address is martian, in which case
|
||||
* the packet will be dropped.
|
||||
* After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
|
||||
* will fail, while __ip_route_output_key() will return success. The source
|
||||
* address for __ip_route_output_key() is set to zero, so __ip_route_output_key
|
||||
@@ -237,7 +242,8 @@ static void __br_dnat_complain(void)
|
||||
*
|
||||
* --Lennert, 20020411
|
||||
* --Bart, 20020416 (updated)
|
||||
* --Bart, 20021007 (updated) */
|
||||
* --Bart, 20021007 (updated)
|
||||
* --Bart, 20062711 (updated) */
|
||||
static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->pkt_type == PACKET_OTHERHOST) {
|
||||
@@ -264,15 +270,15 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
|
||||
struct net_device *dev = skb->dev;
|
||||
struct iphdr *iph = skb->nh.iph;
|
||||
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
|
||||
int err;
|
||||
|
||||
if (nf_bridge->mask & BRNF_PKT_TYPE) {
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
nf_bridge->mask ^= BRNF_PKT_TYPE;
|
||||
}
|
||||
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
|
||||
|
||||
if (dnat_took_place(skb)) {
|
||||
if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
|
||||
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
|
||||
struct rtable *rt;
|
||||
struct flowi fl = {
|
||||
.nl_u = {
|
||||
@@ -283,19 +289,33 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
|
||||
},
|
||||
.proto = 0,
|
||||
};
|
||||
struct in_device *in_dev = in_dev_get(dev);
|
||||
|
||||
/* If err equals -EHOSTUNREACH the error is due to a
|
||||
* martian destination or due to the fact that
|
||||
* forwarding is disabled. For most martian packets,
|
||||
* ip_route_output_key() will fail. It won't fail for 2 types of
|
||||
* martian destinations: loopback destinations and destination
|
||||
* 0.0.0.0. In both cases the packet will be dropped because the
|
||||
* destination is the loopback device and not the bridge. */
|
||||
if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
|
||||
goto free_skb;
|
||||
|
||||
if (!ip_route_output_key(&rt, &fl)) {
|
||||
/* - Bridged-and-DNAT'ed traffic doesn't
|
||||
* require ip_forwarding.
|
||||
* - Deal with redirected traffic. */
|
||||
if (((struct dst_entry *)rt)->dev == dev ||
|
||||
rt->rt_type == RTN_LOCAL) {
|
||||
* require ip_forwarding. */
|
||||
if (((struct dst_entry *)rt)->dev == dev) {
|
||||
skb->dst = (struct dst_entry *)rt;
|
||||
goto bridged_dnat;
|
||||
}
|
||||
/* we are sure that forwarding is disabled, so printing
|
||||
* this message is no problem. Note that the packet could
|
||||
* still have a martian destination address, in which case
|
||||
* the packet could be dropped even if forwarding were enabled */
|
||||
__br_dnat_complain();
|
||||
dst_release((struct dst_entry *)rt);
|
||||
}
|
||||
free_skb:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
} else {
|
||||
|
||||
@@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev)
|
||||
|
||||
EXPORT_SYMBOL(unregister_netdev);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int dev_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *ocpu)
|
||||
@@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
/**
|
||||
|
||||
+1
-1
@@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops)
|
||||
if (ops->gc())
|
||||
return NULL;
|
||||
}
|
||||
dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC);
|
||||
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
|
||||
if (!dst)
|
||||
return NULL;
|
||||
memset(dst, 0, ops->entry_size);
|
||||
|
||||
+2
-4
@@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
|
||||
|
||||
#define flow_table(cpu) (per_cpu(flow_tables, cpu))
|
||||
|
||||
static kmem_cache_t *flow_cachep __read_mostly;
|
||||
static struct kmem_cache *flow_cachep __read_mostly;
|
||||
|
||||
static int flow_lwm, flow_hwm;
|
||||
|
||||
@@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
|
||||
if (flow_count(cpu) > flow_hwm)
|
||||
flow_cache_shrink(cpu);
|
||||
|
||||
fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
|
||||
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
|
||||
if (fle) {
|
||||
fle->next = *head;
|
||||
*head = fle;
|
||||
@@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
|
||||
tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int flow_cache_cpu(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
@@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb,
|
||||
__flow_cache_shrink((unsigned long)hcpu, 0);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __init flow_cache_init(void)
|
||||
{
|
||||
|
||||
@@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
|
||||
goto out_entries;
|
||||
}
|
||||
|
||||
n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
|
||||
n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
|
||||
if (!n)
|
||||
goto out_entries;
|
||||
|
||||
|
||||
+11
-9
@@ -68,8 +68,8 @@
|
||||
|
||||
#include "kmap_skb.h"
|
||||
|
||||
static kmem_cache_t *skbuff_head_cache __read_mostly;
|
||||
static kmem_cache_t *skbuff_fclone_cache __read_mostly;
|
||||
static struct kmem_cache *skbuff_head_cache __read_mostly;
|
||||
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
|
||||
|
||||
/*
|
||||
* Keep out-of-line to prevent kernel bloat.
|
||||
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug);
|
||||
* @gfp_mask: allocation mask
|
||||
* @fclone: allocate from fclone cache instead of head cache
|
||||
* and allocate a cloned (child) skb
|
||||
* @node: numa node to allocate memory on
|
||||
*
|
||||
* Allocate a new &sk_buff. The returned buffer has no headroom and a
|
||||
* tail room of size bytes. The object has a reference count of one.
|
||||
@@ -141,9 +142,9 @@ EXPORT_SYMBOL(skb_truesize_bug);
|
||||
* %GFP_ATOMIC.
|
||||
*/
|
||||
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
int fclone)
|
||||
int fclone, int node)
|
||||
{
|
||||
kmem_cache_t *cache;
|
||||
struct kmem_cache *cache;
|
||||
struct skb_shared_info *shinfo;
|
||||
struct sk_buff *skb;
|
||||
u8 *data;
|
||||
@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
|
||||
|
||||
/* Get the HEAD */
|
||||
skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
|
||||
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
/* Get the DATA. Size must match skb_add_mtu(). */
|
||||
size = SKB_DATA_ALIGN(size);
|
||||
data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
|
||||
gfp_mask);
|
||||
data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
|
||||
gfp_mask, node);
|
||||
if (!data)
|
||||
goto nodata;
|
||||
|
||||
@@ -210,7 +211,7 @@ nodata:
|
||||
* Buffers may only be allocated from interrupts using a @gfp_mask of
|
||||
* %GFP_ATOMIC.
|
||||
*/
|
||||
struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
|
||||
struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
|
||||
unsigned int size,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
@@ -267,9 +268,10 @@ nodata:
|
||||
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
|
||||
unsigned int length, gfp_t gfp_mask)
|
||||
{
|
||||
int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
|
||||
skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
|
||||
if (likely(skb)) {
|
||||
skb_reserve(skb, NET_SKB_PAD);
|
||||
skb->dev = dev;
|
||||
|
||||
+6
-19
@@ -810,24 +810,11 @@ lenout:
|
||||
*/
|
||||
static void inline sock_lock_init(struct sock *sk)
|
||||
{
|
||||
spin_lock_init(&sk->sk_lock.slock);
|
||||
sk->sk_lock.owner = NULL;
|
||||
init_waitqueue_head(&sk->sk_lock.wq);
|
||||
/*
|
||||
* Make sure we are not reinitializing a held lock:
|
||||
*/
|
||||
debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
|
||||
|
||||
/*
|
||||
* Mark both the sk_lock and the sk_lock.slock as a
|
||||
* per-address-family lock class:
|
||||
*/
|
||||
lockdep_set_class_and_name(&sk->sk_lock.slock,
|
||||
af_family_slock_keys + sk->sk_family,
|
||||
af_family_slock_key_strings[sk->sk_family]);
|
||||
lockdep_init_map(&sk->sk_lock.dep_map,
|
||||
af_family_key_strings[sk->sk_family],
|
||||
af_family_keys + sk->sk_family, 0);
|
||||
sock_lock_init_class_and_name(sk,
|
||||
af_family_slock_key_strings[sk->sk_family],
|
||||
af_family_slock_keys + sk->sk_family,
|
||||
af_family_key_strings[sk->sk_family],
|
||||
af_family_keys + sk->sk_family);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority,
|
||||
struct proto *prot, int zero_it)
|
||||
{
|
||||
struct sock *sk = NULL;
|
||||
kmem_cache_t *slab = prot->slab;
|
||||
struct kmem_cache *slab = prot->slab;
|
||||
|
||||
if (slab != NULL)
|
||||
sk = kmem_cache_alloc(slab, priority);
|
||||
|
||||
+2
-2
@@ -2130,7 +2130,7 @@ int iw_handler_set_spy(struct net_device * dev,
|
||||
* The rtnl_lock() make sure we don't race with the other iw_handlers.
|
||||
* This make sure wireless_spy_update() "see" that the spy list
|
||||
* is temporarily disabled. */
|
||||
wmb();
|
||||
smp_wmb();
|
||||
|
||||
/* Are there are addresses to copy? */
|
||||
if(wrqu->data.length > 0) {
|
||||
@@ -2159,7 +2159,7 @@ int iw_handler_set_spy(struct net_device * dev,
|
||||
}
|
||||
|
||||
/* Make sure above is updated before re-enabling */
|
||||
wmb();
|
||||
smp_wmb();
|
||||
|
||||
/* Enable addresses */
|
||||
spydata->spy_number = wrqu->data.length;
|
||||
|
||||
+2
-2
@@ -21,8 +21,8 @@
|
||||
|
||||
#include <net/sock.h>
|
||||
|
||||
static kmem_cache_t *dccp_ackvec_slab;
|
||||
static kmem_cache_t *dccp_ackvec_record_slab;
|
||||
static struct kmem_cache *dccp_ackvec_slab;
|
||||
static struct kmem_cache *dccp_ackvec_record_slab;
|
||||
|
||||
static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
|
||||
{
|
||||
|
||||
+3
-3
@@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void)
|
||||
#define ccids_read_unlock() do { } while(0)
|
||||
#endif
|
||||
|
||||
static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
|
||||
static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
|
||||
{
|
||||
kmem_cache_t *slab;
|
||||
struct kmem_cache *slab;
|
||||
char slab_name_fmt[32], *slab_name;
|
||||
va_list args;
|
||||
|
||||
@@ -75,7 +75,7 @@ static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
|
||||
return slab;
|
||||
}
|
||||
|
||||
static void ccid_kmem_cache_destroy(kmem_cache_t *slab)
|
||||
static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
|
||||
{
|
||||
if (slab != NULL) {
|
||||
const char *name = kmem_cache_name(slab);
|
||||
|
||||
+2
-2
@@ -27,9 +27,9 @@ struct ccid_operations {
|
||||
unsigned char ccid_id;
|
||||
const char *ccid_name;
|
||||
struct module *ccid_owner;
|
||||
kmem_cache_t *ccid_hc_rx_slab;
|
||||
struct kmem_cache *ccid_hc_rx_slab;
|
||||
__u32 ccid_hc_rx_obj_size;
|
||||
kmem_cache_t *ccid_hc_tx_slab;
|
||||
struct kmem_cache *ccid_hc_tx_slab;
|
||||
__u32 ccid_hc_tx_obj_size;
|
||||
int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
|
||||
int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
|
||||
|
||||
@@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
|
||||
new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
|
||||
if (new_packet == NULL || new_packet->dccphtx_sent) {
|
||||
new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
|
||||
SLAB_ATOMIC);
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (unlikely(new_packet == NULL)) {
|
||||
DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
|
||||
@@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
|
||||
/* new loss event detected */
|
||||
/* calculate last interval length */
|
||||
seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
|
||||
entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
|
||||
entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC);
|
||||
|
||||
if (entry == NULL) {
|
||||
DCCP_BUG("out of memory - can not allocate entry");
|
||||
@@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
|
||||
skb, SLAB_ATOMIC);
|
||||
skb, GFP_ATOMIC);
|
||||
if (unlikely(packet == NULL)) {
|
||||
DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
|
||||
"to history, consider it lost!\n", dccp_role(sk), sk);
|
||||
|
||||
@@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
|
||||
entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
|
||||
entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC);
|
||||
if (entry == NULL) {
|
||||
dccp_li_hist_purge(hist, list);
|
||||
DCCP_BUG("loss interval list entry is NULL");
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#define DCCP_LI_HIST_IVAL_F_LENGTH 8
|
||||
|
||||
struct dccp_li_hist {
|
||||
kmem_cache_t *dccplih_slab;
|
||||
struct kmem_cache *dccplih_slab;
|
||||
};
|
||||
|
||||
extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
|
||||
|
||||
@@ -68,14 +68,14 @@ struct dccp_rx_hist_entry {
|
||||
};
|
||||
|
||||
struct dccp_tx_hist {
|
||||
kmem_cache_t *dccptxh_slab;
|
||||
struct kmem_cache *dccptxh_slab;
|
||||
};
|
||||
|
||||
extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
|
||||
extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
|
||||
|
||||
struct dccp_rx_hist {
|
||||
kmem_cache_t *dccprxh_slab;
|
||||
struct kmem_cache *dccprxh_slab;
|
||||
};
|
||||
|
||||
extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
|
||||
|
||||
@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n
|
||||
static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
|
||||
static DEFINE_RWLOCK(dn_fib_tables_lock);
|
||||
|
||||
static kmem_cache_t *dn_hash_kmem __read_mostly;
|
||||
static struct kmem_cache *dn_hash_kmem __read_mostly;
|
||||
static int dn_fib_hash_zombies;
|
||||
|
||||
static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
|
||||
@@ -590,7 +590,7 @@ create:
|
||||
|
||||
replace:
|
||||
err = -ENOBUFS;
|
||||
new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL);
|
||||
new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
|
||||
if (new_f == NULL)
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -431,6 +431,17 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&mac->lock, flags);
|
||||
mac->associnfo.associating = 1;
|
||||
schedule_work(&mac->associnfo.work);
|
||||
spin_unlock_irqrestore(&mac->lock, flags);
|
||||
}
|
||||
|
||||
int
|
||||
ieee80211softmac_handle_disassoc(struct net_device * dev,
|
||||
struct ieee80211_disassoc *disassoc)
|
||||
@@ -449,8 +460,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
|
||||
dprintk(KERN_INFO PFX "got disassoc frame\n");
|
||||
ieee80211softmac_disassoc(mac);
|
||||
|
||||
/* try to reassociate */
|
||||
schedule_delayed_work(&mac->associnfo.work, 0);
|
||||
ieee80211softmac_try_reassoc(mac);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -337,6 +337,8 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
|
||||
/* can't transmit data right now... */
|
||||
netif_carrier_off(mac->dev);
|
||||
spin_unlock_irqrestore(&mac->lock, flags);
|
||||
|
||||
ieee80211softmac_try_reassoc(mac);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -239,4 +239,6 @@ void ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, in
|
||||
int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
|
||||
int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask);
|
||||
|
||||
void ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac);
|
||||
|
||||
#endif /* IEEE80211SOFTMAC_PRIV_H_ */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user