You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (51 commits) netfilter: ipset: Fix the order of listing of sets ip6_pol_route panic: Do not allow VLAN on loopback bnx2x: Fix port identification problem r8169: add Realtek as maintainer. ip: ip_options_compile() resilient to NULL skb route bna: fix memory leak during RX path cleanup bna: fix for clean fw re-initialization usbnet: Fix up 'FLAG_POINTTOPOINT' and 'FLAG_MULTI_PACKET' overlaps. iwlegacy: fix tx_power initialization Revert "tcp: disallow bind() to reuse addr/port" qlcnic: limit skb frags for non tso packet net: can: mscan: fix build breakage in mpc5xxx_can netfilter: ipset: set match and SET target fixes netfilter: ipset: bitmap:ip,mac type requires "src" for MAC sctp: fix oops while removed transport still using as retran path sctp: fix oops when updating retransmit path with DEBUG on net: Disable NETIF_F_TSO_ECN when TSO is disabled net: Disable all TSO features when SG is disabled sfc: Use rmb() to ensure reads occur in order ieee802154: Remove hacked CFLAGS in net/ieee802154/Makefile ...
This commit is contained in:
@@ -151,6 +151,7 @@ S: Maintained
|
||||
F: drivers/net/hamradio/6pack.c
|
||||
|
||||
8169 10/100/1000 GIGABIT ETHERNET DRIVER
|
||||
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
|
||||
M: Francois Romieu <romieu@fr.zoreil.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
|
||||
cbq->callback(msg, nsp);
|
||||
kfree_skb(skb);
|
||||
cn_queue_release_callback(cbq);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
+18
-13
@@ -38,6 +38,8 @@
|
||||
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
|
||||
#define bfa_ioc_notify_fail(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
|
||||
#define bfa_ioc_sync_start(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
|
||||
#define bfa_ioc_sync_join(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
|
||||
#define bfa_ioc_sync_leave(__ioc) \
|
||||
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
||||
switch (event) {
|
||||
case IOCPF_E_SEMLOCKED:
|
||||
if (bfa_ioc_firmware_lock(ioc)) {
|
||||
if (bfa_ioc_sync_complete(ioc)) {
|
||||
if (bfa_ioc_sync_start(ioc)) {
|
||||
iocpf->retry_count = 0;
|
||||
bfa_ioc_sync_join(ioc);
|
||||
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
|
||||
@@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
|
||||
* execution context (driver/bios) must match.
|
||||
*/
|
||||
static bool
|
||||
bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
|
||||
bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
|
||||
{
|
||||
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
|
||||
|
||||
@@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
|
||||
if (fwhdr.signature != drv_fwhdr->signature)
|
||||
return false;
|
||||
|
||||
if (fwhdr.exec != drv_fwhdr->exec)
|
||||
if (swab32(fwhdr.param) != boot_env)
|
||||
return false;
|
||||
|
||||
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
|
||||
@@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
|
||||
{
|
||||
enum bfi_ioc_state ioc_fwstate;
|
||||
bool fwvalid;
|
||||
u32 boot_env;
|
||||
|
||||
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
|
||||
|
||||
boot_env = BFI_BOOT_LOADER_OS;
|
||||
|
||||
if (force)
|
||||
ioc_fwstate = BFI_IOC_UNINIT;
|
||||
|
||||
@@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
|
||||
* check if firmware is valid
|
||||
*/
|
||||
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
|
||||
false : bfa_ioc_fwver_valid(ioc);
|
||||
false : bfa_ioc_fwver_valid(ioc, boot_env);
|
||||
|
||||
if (!fwvalid) {
|
||||
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
|
||||
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
|
||||
/**
|
||||
* Initialize the h/w for any other states.
|
||||
*/
|
||||
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
|
||||
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
|
||||
*/
|
||||
static void
|
||||
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
|
||||
u32 boot_param)
|
||||
u32 boot_env)
|
||||
{
|
||||
u32 *fwimg;
|
||||
u32 pgnum, pgoff;
|
||||
@@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
|
||||
/*
|
||||
* Set boot type and boot param at the end.
|
||||
*/
|
||||
writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
|
||||
writel(boot_type, ((ioc->ioc_regs.smem_page_start)
|
||||
+ (BFI_BOOT_TYPE_OFF)));
|
||||
writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
|
||||
+ (BFI_BOOT_PARAM_OFF)));
|
||||
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
|
||||
+ (BFI_BOOT_LOADER_OFF)));
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
|
||||
* as the entry vector.
|
||||
*/
|
||||
static void
|
||||
bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
|
||||
bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
|
||||
{
|
||||
void __iomem *rb;
|
||||
|
||||
@@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
|
||||
* Initialize IOC state of all functions on a chip reset.
|
||||
*/
|
||||
rb = ioc->pcidev.pci_bar_kva;
|
||||
if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
|
||||
if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
|
||||
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
|
||||
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
|
||||
} else {
|
||||
@@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
|
||||
}
|
||||
|
||||
bfa_ioc_msgflush(ioc);
|
||||
bfa_ioc_download_fw(ioc, boot_type, boot_param);
|
||||
bfa_ioc_download_fw(ioc, boot_type, boot_env);
|
||||
|
||||
/**
|
||||
* Enable interrupts just before starting LPU
|
||||
|
||||
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
|
||||
bool msix);
|
||||
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
|
||||
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
|
||||
bool (*ioc_sync_start) (struct bfa_ioc *ioc);
|
||||
void (*ioc_sync_join) (struct bfa_ioc *ioc);
|
||||
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
|
||||
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
|
||||
|
||||
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
|
||||
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
|
||||
static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
|
||||
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
|
||||
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
|
||||
nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
|
||||
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
|
||||
nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
|
||||
nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
|
||||
nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
|
||||
nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
|
||||
@@ -342,6 +344,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
|
||||
bfa_nw_ioc_hw_sem_release(ioc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Synchronized IOC failure processing routines
|
||||
*/
|
||||
static bool
|
||||
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
|
||||
{
|
||||
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
|
||||
u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
|
||||
|
||||
/*
|
||||
* Driver load time. If the sync required bit for this PCI fn
|
||||
* is set, it is due to an unclean exit by the driver for this
|
||||
* PCI fn in the previous incarnation. Whoever comes here first
|
||||
* should clean it up, no matter which PCI fn.
|
||||
*/
|
||||
|
||||
if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
|
||||
writel(0, ioc->ioc_regs.ioc_fail_sync);
|
||||
writel(1, ioc->ioc_regs.ioc_usage_reg);
|
||||
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
|
||||
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
|
||||
return true;
|
||||
}
|
||||
|
||||
return bfa_ioc_ct_sync_complete(ioc);
|
||||
}
|
||||
/**
|
||||
* Synchronized IOC failure processing routines
|
||||
*/
|
||||
|
||||
@@ -184,12 +184,14 @@ enum bfi_mclass {
|
||||
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
|
||||
|
||||
#define BFI_BOOT_TYPE_OFF 8
|
||||
#define BFI_BOOT_PARAM_OFF 12
|
||||
#define BFI_BOOT_LOADER_OFF 12
|
||||
|
||||
#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
|
||||
#define BFI_BOOT_TYPE_NORMAL 0
|
||||
#define BFI_BOOT_TYPE_FLASH 1
|
||||
#define BFI_BOOT_TYPE_MEMTEST 2
|
||||
|
||||
#define BFI_BOOT_LOADER_OS 0
|
||||
|
||||
#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
|
||||
#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
|
||||
|
||||
|
||||
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
|
||||
/* Initialize the Rx event handlers */
|
||||
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
|
||||
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
|
||||
rx_cbfn.rcb_destroy_cbfn = NULL;
|
||||
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
|
||||
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
|
||||
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
|
||||
|
||||
@@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
|
||||
for (i = 0; i < (data * 2); i++) {
|
||||
if ((i % 2) == 0)
|
||||
bnx2x_set_led(&bp->link_params, &bp->link_vars,
|
||||
LED_MODE_OPER, SPEED_1000);
|
||||
LED_MODE_ON, SPEED_1000);
|
||||
else
|
||||
bnx2x_set_led(&bp->link_params, &bp->link_vars,
|
||||
LED_MODE_OFF, 0);
|
||||
LED_MODE_FRONT_PANEL_OFF, 0);
|
||||
|
||||
msleep_interruptible(500);
|
||||
if (signal_pending(current))
|
||||
break;
|
||||
}
|
||||
|
||||
if (bp->link_vars.link_up)
|
||||
bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
|
||||
bp->link_vars.line_speed);
|
||||
bnx2x_set_led(&bp->link_params, &bp->link_vars,
|
||||
LED_MODE_OPER, bp->link_vars.line_speed);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
|
||||
bond_info->tx_hashtbl = new_hashtbl;
|
||||
|
||||
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
|
||||
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
|
||||
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
|
||||
}
|
||||
|
||||
_unlock_tx_hashtbl(bond);
|
||||
@@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
|
||||
*/
|
||||
rlb_choose_channel(skb, bond);
|
||||
|
||||
/* The ARP relpy packets must be delayed so that
|
||||
/* The ARP reply packets must be delayed so that
|
||||
* they can cancel out the influence of the ARP request.
|
||||
*/
|
||||
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
|
||||
@@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
|
||||
*
|
||||
* If the permanent hw address of @slave is @bond's hw address, we need to
|
||||
* find a different hw address to give @slave, that isn't in use by any other
|
||||
* slave in the bond. This address must be, of course, one of the premanent
|
||||
* slave in the bond. This address must be, of course, one of the permanent
|
||||
* addresses of the other slaves.
|
||||
*
|
||||
* We go over the slave list, and for each slave there we compare its
|
||||
|
||||
@@ -75,7 +75,7 @@ struct tlb_client_info {
|
||||
* gave this entry index.
|
||||
*/
|
||||
u32 tx_bytes; /* Each Client accumulates the BytesTx that
|
||||
* were tranmitted to it, and after each
|
||||
* were transmitted to it, and after each
|
||||
* CallBack the LoadHistory is divided
|
||||
* by the balance interval
|
||||
*/
|
||||
@@ -122,7 +122,6 @@ struct tlb_slave_info {
|
||||
};
|
||||
|
||||
struct alb_bond_info {
|
||||
struct timer_list alb_timer;
|
||||
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
|
||||
spinlock_t tx_hashtbl_lock;
|
||||
u32 unbalanced_load;
|
||||
@@ -140,7 +139,6 @@ struct alb_bond_info {
|
||||
struct slave *next_rx_slave;/* next slave to be assigned
|
||||
* to a new rx client for
|
||||
*/
|
||||
u32 rlb_interval_counter;
|
||||
u8 primary_is_promisc; /* boolean */
|
||||
u32 rlb_promisc_timeout_counter;/* counts primary
|
||||
* promiscuity time
|
||||
|
||||
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
|
||||
|
||||
if (!ofdev->dev.of_match)
|
||||
return -EINVAL;
|
||||
data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
|
||||
data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
|
||||
|
||||
base = of_iomap(np, 0);
|
||||
if (!base) {
|
||||
|
||||
@@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
|
||||
| NETIF_F_RXCSUM
|
||||
| NETIF_F_HIGHDMA
|
||||
| NETIF_F_LLTX
|
||||
| NETIF_F_NETNS_LOCAL;
|
||||
| NETIF_F_NETNS_LOCAL
|
||||
| NETIF_F_VLAN_CHALLENGED;
|
||||
dev->ethtool_ops = &loopback_ethtool_ops;
|
||||
dev->header_ops = ð_header_ops;
|
||||
dev->netdev_ops = &loopback_ops;
|
||||
|
||||
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
|
||||
prev_eedata = eedata;
|
||||
}
|
||||
|
||||
/* Store MAC Address in perm_addr */
|
||||
memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
|
||||
|
||||
dev->base_addr = (unsigned long __force) ioaddr;
|
||||
dev->irq = irq;
|
||||
|
||||
|
||||
@@ -174,7 +174,7 @@
|
||||
|
||||
#define MAX_NUM_CARDS 4
|
||||
|
||||
#define MAX_BUFFERS_PER_CMD 32
|
||||
#define NETXEN_MAX_FRAGS_PER_TX 14
|
||||
#define MAX_TSO_HEADER_DESC 2
|
||||
#define MGMT_CMD_DESC_RESV 4
|
||||
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
|
||||
@@ -558,7 +558,7 @@ struct netxen_recv_crb {
|
||||
*/
|
||||
struct netxen_cmd_buffer {
|
||||
struct sk_buff *skb;
|
||||
struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
|
||||
struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
|
||||
u32 frag_count;
|
||||
};
|
||||
|
||||
|
||||
@@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
struct cmd_desc_type0 *hwdesc, *first_desc;
|
||||
struct pci_dev *pdev;
|
||||
int i, k;
|
||||
int delta = 0;
|
||||
struct skb_frag_struct *frag;
|
||||
|
||||
u32 producer;
|
||||
int frag_count, no_of_desc;
|
||||
@@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
frag_count = skb_shinfo(skb)->nr_frags + 1;
|
||||
|
||||
/* 14 frags supported for normal packet and
|
||||
* 32 frags supported for TSO packet
|
||||
*/
|
||||
if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
|
||||
|
||||
for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
delta += frag->size;
|
||||
}
|
||||
|
||||
if (!__pskb_pull_tail(skb, delta))
|
||||
goto drop_packet;
|
||||
|
||||
frag_count = 1 + skb_shinfo(skb)->nr_frags;
|
||||
}
|
||||
/* 4 fragments per cmd des */
|
||||
no_of_desc = (frag_count + 3) >> 2;
|
||||
|
||||
|
||||
@@ -99,6 +99,7 @@
|
||||
#define TX_UDPV6_PKT 0x0c
|
||||
|
||||
/* Tx defines */
|
||||
#define QLCNIC_MAX_FRAGS_PER_TX 14
|
||||
#define MAX_TSO_HEADER_DESC 2
|
||||
#define MGMT_CMD_DESC_RESV 4
|
||||
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
|
||||
|
||||
@@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
struct cmd_desc_type0 *hwdesc, *first_desc;
|
||||
struct pci_dev *pdev;
|
||||
struct ethhdr *phdr;
|
||||
int delta = 0;
|
||||
int i, k;
|
||||
|
||||
u32 producer;
|
||||
@@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
}
|
||||
|
||||
frag_count = skb_shinfo(skb)->nr_frags + 1;
|
||||
/* 14 frags supported for normal packet and
|
||||
* 32 frags supported for TSO packet
|
||||
*/
|
||||
if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
|
||||
|
||||
for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
|
||||
delta += skb_shinfo(skb)->frags[i].size;
|
||||
|
||||
if (!__pskb_pull_tail(skb, delta))
|
||||
goto drop_packet;
|
||||
|
||||
frag_count = 1 + skb_shinfo(skb)->nr_frags;
|
||||
}
|
||||
|
||||
/* 4 fragments per cmd des */
|
||||
no_of_desc = (frag_count + 3) >> 2;
|
||||
|
||||
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
||||
* processing to finish, then directly poll (and ack ) the eventq.
|
||||
* Finally reenable NAPI and interrupts.
|
||||
*
|
||||
* Since we are touching interrupts the caller should hold the suspend lock
|
||||
* This is for use only during a loopback self-test. It must not
|
||||
* deliver any packets up the stack as this can result in deadlock.
|
||||
*/
|
||||
void efx_process_channel_now(struct efx_channel *channel)
|
||||
{
|
||||
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
|
||||
|
||||
BUG_ON(channel->channel >= efx->n_channels);
|
||||
BUG_ON(!channel->enabled);
|
||||
BUG_ON(!efx->loopback_selftest);
|
||||
|
||||
/* Disable interrupts and wait for ISRs to complete */
|
||||
efx_nic_disable_interrupts(efx);
|
||||
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
|
||||
* restart the transmit interface early so the watchdog timer stops */
|
||||
efx_start_port(efx);
|
||||
|
||||
if (efx_dev_registered(efx))
|
||||
if (efx_dev_registered(efx) && !efx->port_inhibited)
|
||||
netif_tx_wake_all_queues(efx->net_dev);
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
|
||||
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
value->u32[0] = _efx_readd(efx, reg + 0);
|
||||
rmb();
|
||||
value->u32[1] = _efx_readd(efx, reg + 4);
|
||||
value->u32[2] = _efx_readd(efx, reg + 8);
|
||||
value->u32[3] = _efx_readd(efx, reg + 12);
|
||||
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
|
||||
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
|
||||
#else
|
||||
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
|
||||
rmb();
|
||||
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
|
||||
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
|
||||
* @eventq_mask: Event queue pointer mask
|
||||
* @eventq_read_ptr: Event queue read pointer
|
||||
* @last_eventq_read_ptr: Last event queue read pointer value.
|
||||
* @magic_count: Event queue test event count
|
||||
* @irq_count: Number of IRQs since last adaptive moderation decision
|
||||
* @irq_mod_score: IRQ moderation score
|
||||
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
|
||||
@@ -360,7 +359,6 @@ struct efx_channel {
|
||||
unsigned int eventq_mask;
|
||||
unsigned int eventq_read_ptr;
|
||||
unsigned int last_eventq_read_ptr;
|
||||
unsigned int magic_count;
|
||||
|
||||
unsigned int irq_count;
|
||||
unsigned int irq_mod_score;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user