Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:

	net/mac80211/mlme.c
This commit is contained in:
David S. Miller
2008-08-29 23:06:00 -07:00
69 changed files with 642 additions and 470 deletions
+5
View File
@@ -337,6 +337,11 @@ typedef struct scc_param {
uint scc_tcrc; /* Internal */ uint scc_tcrc; /* Internal */
} sccp_t; } sccp_t;
/* Function code bits.
*/
#define SCC_EB ((u_char) 0x10) /* Set big endian byte order */
#define SCC_GBL ((u_char) 0x20) /* Snooping enabled */
/* CPM Ethernet through SCC1. /* CPM Ethernet through SCC1.
*/ */
typedef struct scc_enet { typedef struct scc_enet {
+3 -3
View File
@@ -822,14 +822,14 @@ config ULTRA32
will be called smc-ultra32. will be called smc-ultra32.
config BFIN_MAC config BFIN_MAC
tristate "Blackfin 527/536/537 on-chip mac support" tristate "Blackfin on-chip MAC support"
depends on NET_ETHERNET && (BF527 || BF537 || BF536) depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537)
select CRC32 select CRC32
select MII select MII
select PHYLIB select PHYLIB
select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
help help
This is the driver for blackfin on-chip mac device. Say Y if you want it This is the driver for Blackfin on-chip mac device. Say Y if you want it
compiled into the kernel. This driver is also available as a module compiled into the kernel. This driver is also available as a module
( = code which can be inserted in and removed from the running kernel ( = code which can be inserted in and removed from the running kernel
whenever you want). The module will be called bfin_mac. whenever you want). The module will be called bfin_mac.
+2 -1
View File
@@ -2232,10 +2232,11 @@ static int atl1e_resume(struct pci_dev *pdev)
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
if (netif_running(netdev)) if (netif_running(netdev)) {
err = atl1e_request_irq(adapter); err = atl1e_request_irq(adapter);
if (err) if (err)
return err; return err;
}
atl1e_reset_hw(&adapter->hw); atl1e_reset_hw(&adapter->hw);
-1
View File
@@ -3022,7 +3022,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_HW_CSUM; netdev->features = NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SG; netdev->features |= NETIF_F_SG;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_LLTX; netdev->features |= NETIF_F_LLTX;
/* /*
+1 -1
View File
@@ -271,7 +271,7 @@ struct bnx2x_fastpath {
(fp->tx_pkt_prod != fp->tx_pkt_cons)) (fp->tx_pkt_prod != fp->tx_pkt_cons))
#define BNX2X_HAS_RX_WORK(fp) \ #define BNX2X_HAS_RX_WORK(fp) \
(fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb)) (fp->rx_comp_cons != rx_cons_sb)
#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) #define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
+115 -109
View File
@@ -59,8 +59,8 @@
#include "bnx2x.h" #include "bnx2x.h"
#include "bnx2x_init.h" #include "bnx2x_init.h"
#define DRV_MODULE_VERSION "1.45.17" #define DRV_MODULE_VERSION "1.45.20"
#define DRV_MODULE_RELDATE "2008/08/13" #define DRV_MODULE_RELDATE "2008/08/25"
#define BNX2X_BC_VER 0x040200 #define BNX2X_BC_VER 0x040200
/* Time in jiffies before concluding the transmitter is hung */ /* Time in jiffies before concluding the transmitter is hung */
@@ -1717,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
return -EEXIST; return -EEXIST;
} }
/* Try for 1 second every 5ms */ /* Try for 5 second every 5ms */
for (cnt = 0; cnt < 200; cnt++) { for (cnt = 0; cnt < 1000; cnt++) {
/* Try to acquire the lock */ /* Try to acquire the lock */
REG_WR(bp, hw_lock_control_reg + 4, resource_bit); REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
lock_status = REG_RD(bp, hw_lock_control_reg); lock_status = REG_RD(bp, hw_lock_control_reg);
@@ -2550,6 +2550,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
BNX2X_ERR("SPIO5 hw attention\n"); BNX2X_ERR("SPIO5 hw attention\n");
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
/* Fan failure attention */ /* Fan failure attention */
@@ -4605,6 +4606,17 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{ {
int i; int i;
if (bp->flags & TPA_ENABLE_FLAG) {
struct tstorm_eth_tpa_exist tpa = {0};
tpa.tpa_exist = 1;
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
((u32 *)&tpa)[0]);
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
((u32 *)&tpa)[1]);
}
/* Zero this manually as its initialization is /* Zero this manually as its initialization is
currently missing in the initTool */ currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -5337,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
} }
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
/* Fan failure is indicated by SPIO 5 */ /* Fan failure is indicated by SPIO 5 */
bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
@@ -5363,17 +5376,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
enable_blocks_attention(bp); enable_blocks_attention(bp);
if (bp->flags & TPA_ENABLE_FLAG) {
struct tstorm_eth_tpa_exist tmp = {0};
tmp.tpa_exist = 1;
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
((u32 *)&tmp)[0]);
REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
((u32 *)&tmp)[1]);
}
if (!BP_NOMCP(bp)) { if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp); bnx2x_acquire_phy_lock(bp);
bnx2x_common_init_phy(bp, bp->common.shmem_base); bnx2x_common_init_phy(bp, bp->common.shmem_base);
@@ -5531,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
/* Port DMAE comes here */ /* Port DMAE comes here */
switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
/* add SPIO 5 to group 0 */ /* add SPIO 5 to group 0 */
val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -6055,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp)
return rc; return rc;
} }
static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
for_each_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
for_each_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
static void bnx2x_netif_start(struct bnx2x *bp)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
if (bp->state == BNX2X_STATE_OPEN)
netif_wake_queue(bp->dev);
bnx2x_napi_enable(bp);
bnx2x_int_enable(bp);
}
}
}
static void bnx2x_netif_stop(struct bnx2x *bp)
{
bnx2x_int_disable_sync(bp);
if (netif_running(bp->dev)) {
bnx2x_napi_disable(bp);
netif_tx_disable(bp->dev);
bp->dev->trans_start = jiffies; /* prevent tx timeout */
}
}
/* /*
* Init service functions * Init service functions
*/ */
@@ -6338,7 +6379,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_init_hw(bp, load_code); rc = bnx2x_init_hw(bp, load_code);
if (rc) { if (rc) {
BNX2X_ERR("HW init failed, aborting\n"); BNX2X_ERR("HW init failed, aborting\n");
goto load_error; goto load_int_disable;
} }
/* Setup NIC internals and enable interrupts */ /* Setup NIC internals and enable interrupts */
@@ -6350,7 +6391,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (!load_code) { if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n"); BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY; rc = -EBUSY;
goto load_int_disable; goto load_rings_free;
} }
} }
@@ -6360,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Enable Rx interrupt handling before sending the ramrod /* Enable Rx interrupt handling before sending the ramrod
as it's completed on Rx FP queue */ as it's completed on Rx FP queue */
for_each_queue(bp, i) bnx2x_napi_enable(bp);
napi_enable(&bnx2x_fp(bp, i, napi));
/* Enable interrupt handling */ /* Enable interrupt handling */
atomic_set(&bp->intr_sem, 0); atomic_set(&bp->intr_sem, 0);
@@ -6369,7 +6409,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_setup_leading(bp); rc = bnx2x_setup_leading(bp);
if (rc) { if (rc) {
BNX2X_ERR("Setup leading failed!\n"); BNX2X_ERR("Setup leading failed!\n");
goto load_stop_netif; goto load_netif_stop;
} }
if (CHIP_IS_E1H(bp)) if (CHIP_IS_E1H(bp))
@@ -6382,7 +6422,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
for_each_nondefault_queue(bp, i) { for_each_nondefault_queue(bp, i) {
rc = bnx2x_setup_multi(bp, i); rc = bnx2x_setup_multi(bp, i);
if (rc) if (rc)
goto load_stop_netif; goto load_netif_stop;
} }
if (CHIP_IS_E1(bp)) if (CHIP_IS_E1(bp))
@@ -6427,20 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return 0; return 0;
load_stop_netif: load_netif_stop:
for_each_queue(bp, i) bnx2x_napi_disable(bp);
napi_disable(&bnx2x_fp(bp, i, napi)); load_rings_free:
load_int_disable:
bnx2x_int_disable_sync(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
/* Free SKBs, SGEs, TPA pool and driver internals */ /* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp); bnx2x_free_skbs(bp);
for_each_queue(bp, i) for_each_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
load_int_disable:
bnx2x_int_disable_sync(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
load_error: load_error:
bnx2x_free_mem(bp); bnx2x_free_mem(bp);
@@ -6455,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
/* halt the connection */ /* halt the connection */
bp->fp[index].state = BNX2X_FP_STATE_HALTING; bp->fp[index].state = BNX2X_FP_STATE_HALTING;
bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
/* Wait for completion */ /* Wait for completion */
rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
@@ -6613,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bp->rx_mode = BNX2X_RX_MODE_NONE; bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_set_storm_rx_mode(bp); bnx2x_set_storm_rx_mode(bp);
if (netif_running(bp->dev)) { bnx2x_netif_stop(bp);
netif_tx_disable(bp->dev); if (!netif_running(bp->dev))
bp->dev->trans_start = jiffies; /* prevent tx timeout */ bnx2x_napi_disable(bp);
}
del_timer_sync(&bp->timer); del_timer_sync(&bp->timer);
SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
@@ -6631,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
smp_rmb(); smp_rmb();
while (BNX2X_HAS_TX_WORK(fp)) { while (BNX2X_HAS_TX_WORK(fp)) {
if (!netif_running(bp->dev)) bnx2x_tx_int(fp, 1000);
bnx2x_tx_int(fp, 1000);
if (!cnt) { if (!cnt) {
BNX2X_ERR("timeout waiting for queue[%d]\n", BNX2X_ERR("timeout waiting for queue[%d]\n",
i); i);
@@ -6649,18 +6682,42 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
smp_rmb(); smp_rmb();
} }
} }
/* Give HW time to discard old tx messages */ /* Give HW time to discard old tx messages */
msleep(1); msleep(1);
for_each_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
/* Disable interrupts after Tx and Rx are disabled on stack level */
bnx2x_int_disable_sync(bp);
/* Release IRQs */ /* Release IRQs */
bnx2x_free_irq(bp); bnx2x_free_irq(bp);
if (CHIP_IS_E1(bp)) {
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
bnx2x_set_mac_addr_e1(bp, 0);
for (i = 0; i < config->hdr.length_6b; i++)
CAM_INVALIDATE(config->config_table[i]);
config->hdr.length_6b = i;
if (CHIP_REV_IS_SLOW(bp))
config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
else
config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
config->hdr.client_id = BP_CL_ID(bp);
config->hdr.reserved1 = 0;
bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
} else { /* E1H */
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
bnx2x_set_mac_addr_e1h(bp, 0);
for (i = 0; i < MC_HASH_SIZE; i++)
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
}
if (unload_mode == UNLOAD_NORMAL) if (unload_mode == UNLOAD_NORMAL)
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -6689,37 +6746,6 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
} else } else
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
if (CHIP_IS_E1(bp)) {
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
bnx2x_set_mac_addr_e1(bp, 0);
for (i = 0; i < config->hdr.length_6b; i++)
CAM_INVALIDATE(config->config_table[i]);
config->hdr.length_6b = i;
if (CHIP_REV_IS_SLOW(bp))
config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
else
config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
config->hdr.client_id = BP_CL_ID(bp);
config->hdr.reserved1 = 0;
bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
} else { /* E1H */
bnx2x_set_mac_addr_e1h(bp, 0);
for (i = 0; i < MC_HASH_SIZE; i++)
REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
}
if (CHIP_IS_E1H(bp))
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
/* Close multi and leading connections /* Close multi and leading connections
Completions for ramrods are collected in a synchronous way */ Completions for ramrods are collected in a synchronous way */
for_each_nondefault_queue(bp, i) for_each_nondefault_queue(bp, i)
@@ -6821,6 +6847,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
*/ */
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (val == 0x7)
REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
if (val == 0x7) { if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
/* save our func */ /* save our func */
@@ -6898,7 +6928,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
(SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK); DRV_MSG_SEQ_NUMBER_MASK);
} }
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
} }
} }
@@ -8617,34 +8646,6 @@ test_mem_exit:
return rc; return rc;
} }
static void bnx2x_netif_start(struct bnx2x *bp)
{
int i;
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
bnx2x_int_enable(bp);
for_each_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
if (bp->state == BNX2X_STATE_OPEN)
netif_wake_queue(bp->dev);
}
}
}
static void bnx2x_netif_stop(struct bnx2x *bp)
{
int i;
if (netif_running(bp->dev)) {
netif_tx_disable(bp->dev);
bp->dev->trans_start = jiffies; /* prevent tx timeout */
for_each_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
bnx2x_int_disable_sync(bp);
}
static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
{ {
int cnt = 1000; int cnt = 1000;
@@ -9250,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
napi); napi);
struct bnx2x *bp = fp->bp; struct bnx2x *bp = fp->bp;
int work_done = 0; int work_done = 0;
u16 rx_cons_sb;
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic)) if (unlikely(bp->panic))
@@ -9265,10 +9267,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
if (BNX2X_HAS_TX_WORK(fp)) if (BNX2X_HAS_TX_WORK(fp))
bnx2x_tx_int(fp, budget); bnx2x_tx_int(fp, budget);
rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
rx_cons_sb++;
if (BNX2X_HAS_RX_WORK(fp)) if (BNX2X_HAS_RX_WORK(fp))
work_done = bnx2x_rx_int(fp, budget); work_done = bnx2x_rx_int(fp, budget);
rmb(); /* BNX2X_HAS_WORK() reads the status block */ rmb(); /* BNX2X_HAS_WORK() reads the status block */
rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
rx_cons_sb++;
/* must not complete if we consumed full budget */ /* must not complete if we consumed full budget */
if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
@@ -9484,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
fp_index = (smp_processor_id() % bp->num_queues); fp_index = (smp_processor_id() % bp->num_queues);
fp = &bp->fp[fp_index]; fp = &bp->fp[fp_index];
if (unlikely(bnx2x_tx_avail(bp->fp) < if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
(skb_shinfo(skb)->nr_frags + 3))) {
bp->eth_stats.driver_xoff++, bp->eth_stats.driver_xoff++,
netif_stop_queue(dev); netif_stop_queue(dev);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -9548,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_bd->vlan = cpu_to_le16(pkt_prod); tx_bd->vlan = cpu_to_le16(pkt_prod);
if (xmit_type) { if (xmit_type) {
/* turn on parsing and get a BD */ /* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
pbd = (void *)&fp->tx_desc_ring[bd_prod]; pbd = (void *)&fp->tx_desc_ring[bd_prod];
+2 -2
View File
@@ -1838,7 +1838,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
if ((le16_to_cpu(rfd->command) & cb_el) && if ((le16_to_cpu(rfd->command) & cb_el) &&
(RU_RUNNING == nic->ru_running)) (RU_RUNNING == nic->ru_running))
if (readb(&nic->csr->scb.status) & rus_no_res) if (ioread8(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED; nic->ru_running = RU_SUSPENDED;
return -ENODATA; return -ENODATA;
} }
@@ -1861,7 +1861,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
if ((le16_to_cpu(rfd->command) & cb_el) && if ((le16_to_cpu(rfd->command) & cb_el) &&
(RU_RUNNING == nic->ru_running)) { (RU_RUNNING == nic->ru_running)) {
if (readb(&nic->csr->scb.status) & rus_no_res) if (ioread8(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED; nic->ru_running = RU_SUSPENDED;
} }
+2 -2
View File
@@ -5522,7 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_HAS_CHECKSUM) { if (id->driver_data & DEV_HAS_CHECKSUM) {
np->rx_csum = 1; np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features |= NETIF_F_TSO; dev->features |= NETIF_F_TSO;
} }
@@ -5835,7 +5835,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
dev->features & NETIF_F_HIGHDMA ? "highdma " : "", dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "", "csum " : "",
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
"vlan " : "", "vlan " : "",
+8
View File
@@ -792,6 +792,10 @@ static int fs_enet_open(struct net_device *dev)
int r; int r;
int err; int err;
/* to initialize the fep->cur_rx,... */
/* not doing this, will cause a crash in fs_enet_rx_napi */
fs_init_bds(fep->ndev);
if (fep->fpi->use_napi) if (fep->fpi->use_napi)
napi_enable(&fep->napi); napi_enable(&fep->napi);
@@ -1167,6 +1171,10 @@ static struct of_device_id fs_enet_match[] = {
.compatible = "fsl,cpm1-scc-enet", .compatible = "fsl,cpm1-scc-enet",
.data = (void *)&fs_scc_ops, .data = (void *)&fs_scc_ops,
}, },
{
.compatible = "fsl,cpm2-scc-enet",
.data = (void *)&fs_scc_ops,
},
#endif #endif
#ifdef CONFIG_FS_ENET_HAS_FCC #ifdef CONFIG_FS_ENET_HAS_FCC
{ {
+7 -1
View File
@@ -47,7 +47,6 @@
#include "fs_enet.h" #include "fs_enet.h"
/*************************************************/ /*************************************************/
#if defined(CONFIG_CPM1) #if defined(CONFIG_CPM1)
/* for a 8xx __raw_xxx's are sufficient */ /* for a 8xx __raw_xxx's are sufficient */
#define __fs_out32(addr, x) __raw_writel(x, addr) #define __fs_out32(addr, x) __raw_writel(x, addr)
@@ -62,6 +61,8 @@
#define __fs_out16(addr, x) out_be16(addr, x) #define __fs_out16(addr, x) out_be16(addr, x)
#define __fs_in32(addr) in_be32(addr) #define __fs_in32(addr) in_be32(addr)
#define __fs_in16(addr) in_be16(addr) #define __fs_in16(addr) in_be16(addr)
#define __fs_out8(addr, x) out_8(addr, x)
#define __fs_in8(addr) in_8(addr)
#endif #endif
/* write, read, set bits, clear bits */ /* write, read, set bits, clear bits */
@@ -262,8 +263,13 @@ static void restart(struct net_device *dev)
/* Initialize function code registers for big-endian. /* Initialize function code registers for big-endian.
*/ */
#ifndef CONFIG_NOT_COHERENT_CACHE
W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
#else
W8(ep, sen_genscc.scc_rfcr, SCC_EB); W8(ep, sen_genscc.scc_rfcr, SCC_EB);
W8(ep, sen_genscc.scc_tfcr, SCC_EB); W8(ep, sen_genscc.scc_tfcr, SCC_EB);
#endif
/* Set maximum bytes per receive buffer. /* Set maximum bytes per receive buffer.
* This appears to be an Ethernet frame size, not the buffer * This appears to be an Ethernet frame size, not the buffer
+18 -4
View File
@@ -105,6 +105,7 @@ const char gfar_driver_version[] = "1.3";
static int gfar_enet_open(struct net_device *dev); static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev); static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev); static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev); struct sk_buff *gfar_new_skb(struct net_device *dev);
@@ -209,6 +210,7 @@ static int gfar_probe(struct platform_device *pdev)
spin_lock_init(&priv->txlock); spin_lock_init(&priv->txlock);
spin_lock_init(&priv->rxlock); spin_lock_init(&priv->rxlock);
spin_lock_init(&priv->bflock); spin_lock_init(&priv->bflock);
INIT_WORK(&priv->reset_task, gfar_reset_task);
platform_set_drvdata(pdev, dev); platform_set_drvdata(pdev, dev);
@@ -1212,6 +1214,7 @@ static int gfar_close(struct net_device *dev)
napi_disable(&priv->napi); napi_disable(&priv->napi);
cancel_work_sync(&priv->reset_task);
stop_gfar(dev); stop_gfar(dev);
/* Disconnect from the PHY */ /* Disconnect from the PHY */
@@ -1326,13 +1329,16 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
return 0; return 0;
} }
/* gfar_timeout gets called when a packet has not been /* gfar_reset_task gets scheduled when a packet has not been
* transmitted after a set amount of time. * transmitted after a set amount of time.
* For now, assume that clearing out all the structures, and * For now, assume that clearing out all the structures, and
* starting over will fix the problem. */ * starting over will fix the problem.
static void gfar_timeout(struct net_device *dev) */
static void gfar_reset_task(struct work_struct *work)
{ {
dev->stats.tx_errors++; struct gfar_private *priv = container_of(work, struct gfar_private,
reset_task);
struct net_device *dev = priv->dev;
if (dev->flags & IFF_UP) { if (dev->flags & IFF_UP) {
stop_gfar(dev); stop_gfar(dev);
@@ -1342,6 +1348,14 @@ static void gfar_timeout(struct net_device *dev)
netif_tx_schedule_all(dev); netif_tx_schedule_all(dev);
} }
static void gfar_timeout(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
dev->stats.tx_errors++;
schedule_work(&priv->reset_task);
}
/* Interrupt Handler for Transmit complete */ /* Interrupt Handler for Transmit complete */
static int gfar_clean_tx_ring(struct net_device *dev) static int gfar_clean_tx_ring(struct net_device *dev)
{ {
+1
View File
@@ -756,6 +756,7 @@ struct gfar_private {
uint32_t msg_enable; uint32_t msg_enable;
struct work_struct reset_task;
/* Network Statistics */ /* Network Statistics */
struct gfar_extra_stats extra_stats; struct gfar_extra_stats extra_stats;
}; };
+3 -3
View File
@@ -663,9 +663,6 @@ static int emac_configure(struct emac_instance *dev)
if (emac_phy_gpcs(dev->phy.mode)) if (emac_phy_gpcs(dev->phy.mode))
emac_mii_reset_phy(&dev->phy); emac_mii_reset_phy(&dev->phy);
/* Required for Pause packet support in EMAC */
dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
return 0; return 0;
} }
@@ -1150,6 +1147,9 @@ static int emac_open(struct net_device *ndev)
} else } else
netif_carrier_on(dev->ndev); netif_carrier_on(dev->ndev);
/* Required for Pause packet support in EMAC */
dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
emac_configure(dev); emac_configure(dev);
mal_poll_add(dev->mal, &dev->commac); mal_poll_add(dev->mal, &dev->commac);
mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
+3 -2
View File
@@ -904,8 +904,6 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long data_dma_addr; unsigned long data_dma_addr;
desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (skb->ip_summed == CHECKSUM_PARTIAL && if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -924,6 +922,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
buf[1] = 0; buf[1] = 0;
} }
data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO)) if (!firmware_has_feature(FW_FEATURE_CMO))
ibmveth_error_printk("tx: unable to map xmit buffer\n"); ibmveth_error_printk("tx: unable to map xmit buffer\n");
@@ -932,6 +932,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc.fields.address = adapter->bounce_buffer_dma; desc.fields.address = adapter->bounce_buffer_dma;
tx_map_failed++; tx_map_failed++;
used_bounce = 1; used_bounce = 1;
wmb();
} else } else
desc.fields.address = data_dma_addr; desc.fields.address = data_dma_addr;
-1
View File
@@ -87,7 +87,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82576: case E1000_DEV_ID_82576:
case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES: case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
mac->type = e1000_82576; mac->type = e1000_82576;
break; break;
default: default:
-1
View File
@@ -41,7 +41,6 @@ struct e1000_hw;
#define E1000_DEV_ID_82576 0x10C9 #define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6 #define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7 #define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82575EB_COPPER 0x10A7 #define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+6 -11
View File
@@ -373,13 +373,17 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[12] = rd32(E1000_EECD); regs_buff[12] = rd32(E1000_EECD);
/* Interrupt */ /* Interrupt */
regs_buff[13] = rd32(E1000_EICR); /* Reading EICS for EICR because they read the
* same but EICS does not clear on read */
regs_buff[13] = rd32(E1000_EICS);
regs_buff[14] = rd32(E1000_EICS); regs_buff[14] = rd32(E1000_EICS);
regs_buff[15] = rd32(E1000_EIMS); regs_buff[15] = rd32(E1000_EIMS);
regs_buff[16] = rd32(E1000_EIMC); regs_buff[16] = rd32(E1000_EIMC);
regs_buff[17] = rd32(E1000_EIAC); regs_buff[17] = rd32(E1000_EIAC);
regs_buff[18] = rd32(E1000_EIAM); regs_buff[18] = rd32(E1000_EIAM);
regs_buff[19] = rd32(E1000_ICR); /* Reading ICS for ICR because they read the
* same but ICS does not clear on read */
regs_buff[19] = rd32(E1000_ICS);
regs_buff[20] = rd32(E1000_ICS); regs_buff[20] = rd32(E1000_ICS);
regs_buff[21] = rd32(E1000_IMS); regs_buff[21] = rd32(E1000_IMS);
regs_buff[22] = rd32(E1000_IMC); regs_buff[22] = rd32(E1000_IMC);
@@ -1746,15 +1750,6 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* return success for non excluded adapter ports */ /* return success for non excluded adapter ports */
retval = 0; retval = 0;
break; break;
case E1000_DEV_ID_82576_QUAD_COPPER:
/* quad port adapters only support WoL on port A */
if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
wol->supported = 0;
break;
}
/* return success for non excluded adapter ports */
retval = 0;
break;
default: default:
/* dual port cards only support WoL on port A from now on /* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B * unless it was enabled in the eeprom for port B
+11 -14
View File
@@ -61,7 +61,6 @@ static struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -521,7 +520,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->msix_entries, adapter->msix_entries,
numvecs); numvecs);
if (err == 0) if (err == 0)
return; goto out;
igb_reset_interrupt_capability(adapter); igb_reset_interrupt_capability(adapter);
@@ -531,7 +530,7 @@ msi_only:
adapter->num_tx_queues = 1; adapter->num_tx_queues = 1;
if (!pci_enable_msi(adapter->pdev)) if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI; adapter->flags |= IGB_FLAG_HAS_MSI;
out:
/* Notify the stack of the (possibly) reduced Tx Queue count. */ /* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
return; return;
@@ -1217,16 +1216,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0; adapter->eeprom_wol = 0;
break; break;
case E1000_DEV_ID_82576_QUAD_COPPER:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
else
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
} }
/* initialize the wol settings based on the eeprom settings */ /* initialize the wol settings based on the eeprom settings */
@@ -2290,7 +2279,9 @@ static void igb_watchdog_task(struct work_struct *work)
struct igb_ring *tx_ring = adapter->tx_ring; struct igb_ring *tx_ring = adapter->tx_ring;
struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_mac_info *mac = &adapter->hw.mac;
u32 link; u32 link;
u32 eics = 0;
s32 ret_val; s32 ret_val;
int i;
if ((netif_carrier_ok(netdev)) && if ((netif_carrier_ok(netdev)) &&
(rd32(E1000_STATUS) & E1000_STATUS_LU)) (rd32(E1000_STATUS) & E1000_STATUS_LU))
@@ -2392,7 +2383,13 @@ link_up:
} }
/* Cause software interrupt to ensure rx ring is cleaned */ /* Cause software interrupt to ensure rx ring is cleaned */
wr32(E1000_ICS, E1000_ICS_RXDMT0); if (adapter->msix_entries) {
for (i = 0; i < adapter->num_rx_queues; i++)
eics |= adapter->rx_ring[i].eims_value;
wr32(E1000_EICS, eics);
} else {
wr32(E1000_ICS, E1000_ICS_RXDMT0);
}
/* Force detection of hung controller every watchdog period */ /* Force detection of hung controller every watchdog period */
tx_ring->detect_tx_hung = true; tx_ring->detect_tx_hung = true;
+5 -3
View File
@@ -1636,16 +1636,17 @@ static void ixgbe_set_multi(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct dev_mc_list *mc_ptr; struct dev_mc_list *mc_ptr;
u8 *mta_list; u8 *mta_list;
u32 fctrl; u32 fctrl, vlnctrl;
int i; int i;
/* Check for Promiscuous and All Multicast modes */ /* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
if (netdev->flags & IFF_PROMISC) { if (netdev->flags & IFF_PROMISC) {
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
fctrl &= ~IXGBE_VLNCTRL_VFE; vlnctrl &= ~IXGBE_VLNCTRL_VFE;
} else { } else {
if (netdev->flags & IFF_ALLMULTI) { if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE; fctrl |= IXGBE_FCTRL_MPE;
@@ -1653,10 +1654,11 @@ static void ixgbe_set_multi(struct net_device *netdev)
} else { } else {
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
} }
fctrl |= IXGBE_VLNCTRL_VFE; vlnctrl |= IXGBE_VLNCTRL_VFE;
} }
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
if (netdev->mc_count) { if (netdev->mc_count) {
mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
+20 -15
View File
@@ -55,7 +55,7 @@
#include <asm/system.h> #include <asm/system.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.2"; static char mv643xx_eth_driver_version[] = "1.3";
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI #define MV643XX_ETH_NAPI
@@ -474,11 +474,19 @@ static void rxq_refill(struct rx_queue *rxq)
/* /*
* Reserve 2+14 bytes for an ethernet header (the * Reserve 2+14 bytes for an ethernet header (the
* hardware automatically prepends 2 bytes of dummy * hardware automatically prepends 2 bytes of dummy
* data to each received packet), 4 bytes for a VLAN * data to each received packet), 16 bytes for up to
* header, and 4 bytes for the trailing FCS -- 24 * four VLAN tags, and 4 bytes for the trailing FCS
* bytes total. * -- 36 bytes total.
*/ */
skb_size = mp->dev->mtu + 24; skb_size = mp->dev->mtu + 36;
/*
* Make sure that the skb size is a multiple of 8
* bytes, as the lower three bits of the receive
* descriptor's buffer size field are ignored by
* the hardware.
*/
skb_size = (skb_size + 7) & ~7;
skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
if (skb == NULL) if (skb == NULL)
@@ -509,10 +517,8 @@ static void rxq_refill(struct rx_queue *rxq)
skb_reserve(skb, 2); skb_reserve(skb, 2);
} }
if (rxq->rx_desc_count != rxq->rx_ring_size) { if (rxq->rx_desc_count != rxq->rx_ring_size)
rxq->rx_oom.expires = jiffies + (HZ / 10); mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
add_timer(&rxq->rx_oom);
}
spin_unlock_irqrestore(&mp->lock, flags); spin_unlock_irqrestore(&mp->lock, flags);
} }
@@ -529,7 +535,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
int rx; int rx;
rx = 0; rx = 0;
while (rx < budget) { while (rx < budget && rxq->rx_desc_count) {
struct rx_desc *rx_desc; struct rx_desc *rx_desc;
unsigned int cmd_sts; unsigned int cmd_sts;
struct sk_buff *skb; struct sk_buff *skb;
@@ -554,7 +560,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
spin_unlock_irqrestore(&mp->lock, flags); spin_unlock_irqrestore(&mp->lock, flags);
dma_unmap_single(NULL, rx_desc->buf_ptr + 2, dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
mp->dev->mtu + 24, DMA_FROM_DEVICE); rx_desc->buf_size, DMA_FROM_DEVICE);
rxq->rx_desc_count--; rxq->rx_desc_count--;
rx++; rx++;
@@ -636,9 +642,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
txq_reclaim(mp->txq + i, 0); txq_reclaim(mp->txq + i, 0);
if (netif_carrier_ok(mp->dev)) { if (netif_carrier_ok(mp->dev)) {
spin_lock(&mp->lock); spin_lock_irq(&mp->lock);
__txq_maybe_wake(mp->txq + mp->txq_primary); __txq_maybe_wake(mp->txq + mp->txq_primary);
spin_unlock(&mp->lock); spin_unlock_irq(&mp->lock);
} }
} }
#endif #endif
@@ -650,8 +656,6 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (rx < budget) { if (rx < budget) {
netif_rx_complete(mp->dev, napi); netif_rx_complete(mp->dev, napi);
wrl(mp, INT_CAUSE(mp->port_num), 0);
wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
} }
@@ -1796,6 +1800,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
*/ */
#ifdef MV643XX_ETH_NAPI #ifdef MV643XX_ETH_NAPI
if (int_cause & INT_RX) { if (int_cause & INT_RX) {
wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
wrl(mp, INT_MASK(mp->port_num), 0x00000000); wrl(mp, INT_MASK(mp->port_num), 0x00000000);
rdl(mp, INT_MASK(mp->port_num)); rdl(mp, INT_MASK(mp->port_num));

Some files were not shown because too many files have changed in this diff Show More