You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
net: Remove unused netdev arg from some NAPI interfaces.
When the napi api was changed to separate its 1:1 binding to the net_device struct, the netif_rx_[prep|schedule|complete] api failed to remove the now vestigual net_device structure parameter. This patch cleans up that api by properly removing it.. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
889bd9b6db
commit
908a7a16b8
@@ -2541,7 +2541,7 @@ static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic
|
||||
{
|
||||
struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
|
||||
|
||||
netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index], &nesvnic->napi);
|
||||
netif_rx_schedule(&nesvnic->napi);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ static int nes_netdev_poll(struct napi_struct *napi, int budget)
|
||||
nes_nic_ce_handler(nesdev, nescq);
|
||||
|
||||
if (nescq->cqes_pending == 0) {
|
||||
netif_rx_complete(netdev, napi);
|
||||
netif_rx_complete(napi);
|
||||
/* clear out completed cqes and arm */
|
||||
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
|
||||
nescq->cq_number | (nescq->cqe_allocs_pending << 16));
|
||||
|
||||
@@ -446,11 +446,11 @@ poll_more:
|
||||
if (dev->features & NETIF_F_LRO)
|
||||
lro_flush_all(&priv->lro.lro_mgr);
|
||||
|
||||
netif_rx_complete(dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
if (unlikely(ib_req_notify_cq(priv->recv_cq,
|
||||
IB_CQ_NEXT_COMP |
|
||||
IB_CQ_REPORT_MISSED_EVENTS)) &&
|
||||
netif_rx_reschedule(dev, napi))
|
||||
netif_rx_reschedule(napi))
|
||||
goto poll_more;
|
||||
}
|
||||
|
||||
@@ -462,7 +462,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
|
||||
struct net_device *dev = dev_ptr;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
netif_rx_schedule(dev, &priv->napi);
|
||||
netif_rx_schedule(&priv->napi);
|
||||
}
|
||||
|
||||
static void drain_tx_cq(struct net_device *dev)
|
||||
|
||||
@@ -604,7 +604,7 @@ rx_next:
|
||||
|
||||
spin_lock_irqsave(&cp->lock, flags);
|
||||
cpw16_f(IntrMask, cp_intr_mask);
|
||||
__netif_rx_complete(dev, napi);
|
||||
__netif_rx_complete(napi);
|
||||
spin_unlock_irqrestore(&cp->lock, flags);
|
||||
}
|
||||
|
||||
@@ -641,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
|
||||
}
|
||||
|
||||
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
|
||||
if (netif_rx_schedule_prep(dev, &cp->napi)) {
|
||||
if (netif_rx_schedule_prep(&cp->napi)) {
|
||||
cpw16_f(IntrMask, cp_norx_intr_mask);
|
||||
__netif_rx_schedule(dev, &cp->napi);
|
||||
__netif_rx_schedule(&cp->napi);
|
||||
}
|
||||
|
||||
if (status & (TxOK | TxErr | TxEmpty | SWInt))
|
||||
|
||||
@@ -2128,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
|
||||
*/
|
||||
spin_lock_irqsave(&tp->lock, flags);
|
||||
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
||||
__netif_rx_complete(dev, napi);
|
||||
__netif_rx_complete(napi);
|
||||
spin_unlock_irqrestore(&tp->lock, flags);
|
||||
}
|
||||
spin_unlock(&tp->rx_lock);
|
||||
@@ -2178,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
|
||||
/* Receive packets are processed by poll routine.
|
||||
If not running start it now. */
|
||||
if (status & RxAckBits){
|
||||
if (netif_rx_schedule_prep(dev, &tp->napi)) {
|
||||
if (netif_rx_schedule_prep(&tp->napi)) {
|
||||
RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
|
||||
__netif_rx_schedule(dev, &tp->napi);
|
||||
__netif_rx_schedule(&tp->napi);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -831,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
|
||||
if (rx_pkt_limit > 0) {
|
||||
/* Receive descriptor is empty now */
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
__netif_rx_complete(dev, napi);
|
||||
__netif_rx_complete(napi);
|
||||
writel(VAL0|RINTEN0, mmio + INTEN0);
|
||||
writel(VAL2 | RDMD0, mmio + CMD0);
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
@@ -1170,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
|
||||
|
||||
/* Check if Receive Interrupt has occurred. */
|
||||
if (intr0 & RINT0) {
|
||||
if (netif_rx_schedule_prep(dev, &lp->napi)) {
|
||||
if (netif_rx_schedule_prep(&lp->napi)) {
|
||||
/* Disable receive interupts */
|
||||
writel(RINTEN0, mmio + INTEN0);
|
||||
/* Schedule a polling routine */
|
||||
__netif_rx_schedule(dev, &lp->napi);
|
||||
__netif_rx_schedule(&lp->napi);
|
||||
} else if (intren0 & RINTEN0) {
|
||||
printk("************Driver bug! \
|
||||
interrupt while in poll\n");
|
||||
|
||||
@@ -298,7 +298,7 @@ poll_some_more:
|
||||
int more = 0;
|
||||
|
||||
spin_lock_irq(&ep->rx_lock);
|
||||
__netif_rx_complete(dev, napi);
|
||||
__netif_rx_complete(napi);
|
||||
wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
|
||||
if (ep93xx_have_more_rx(ep)) {
|
||||
wrl(ep, REG_INTEN, REG_INTEN_TX);
|
||||
@@ -415,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
|
||||
|
||||
if (status & REG_INTSTS_RX) {
|
||||
spin_lock(&ep->rx_lock);
|
||||
if (likely(netif_rx_schedule_prep(dev, &ep->napi))) {
|
||||
if (likely(netif_rx_schedule_prep(&ep->napi))) {
|
||||
wrl(ep, REG_INTEN, REG_INTEN_TX);
|
||||
__netif_rx_schedule(dev, &ep->napi);
|
||||
__netif_rx_schedule(&ep->napi);
|
||||
}
|
||||
spin_unlock(&ep->rx_lock);
|
||||
}
|
||||
|
||||
@@ -498,7 +498,7 @@ static void eth_rx_irq(void *pdev)
|
||||
printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
|
||||
#endif
|
||||
qmgr_disable_irq(port->plat->rxq);
|
||||
netif_rx_schedule(dev, &port->napi);
|
||||
netif_rx_schedule(&port->napi);
|
||||
}
|
||||
|
||||
static int eth_poll(struct napi_struct *napi, int budget)
|
||||
@@ -526,7 +526,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
|
||||
printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
|
||||
dev->name);
|
||||
#endif
|
||||
netif_rx_complete(dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
qmgr_enable_irq(rxq);
|
||||
if (!qmgr_stat_empty(rxq) &&
|
||||
netif_rx_reschedule(dev, napi)) {
|
||||
@@ -1025,7 +1025,7 @@ static int eth_open(struct net_device *dev)
|
||||
}
|
||||
ports_open++;
|
||||
/* we may already have RX data, enables IRQ */
|
||||
netif_rx_schedule(dev, &port->napi);
|
||||
netif_rx_schedule(&port->napi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
|
||||
AT_WRITE_REG(hw, REG_IMR,
|
||||
IMR_NORMAL_MASK & ~ISR_RX_EVENT);
|
||||
AT_WRITE_FLUSH(hw);
|
||||
if (likely(netif_rx_schedule_prep(netdev,
|
||||
if (likely(netif_rx_schedule_prep(
|
||||
&adapter->napi)))
|
||||
__netif_rx_schedule(netdev, &adapter->napi);
|
||||
__netif_rx_schedule(&adapter->napi);
|
||||
}
|
||||
} while (--max_ints > 0);
|
||||
/* re-enable Interrupt*/
|
||||
@@ -1515,7 +1515,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
|
||||
/* If no Tx and not enough Rx work done, exit the polling mode */
|
||||
if (work_done < budget) {
|
||||
quit_polling:
|
||||
netif_rx_complete(netdev, napi);
|
||||
netif_rx_complete(napi);
|
||||
imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
|
||||
AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
|
||||
/* test debug */
|
||||
|
||||
+3
-3
@@ -875,7 +875,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
|
||||
if (work_done < budget) {
|
||||
netif_rx_complete(netdev, napi);
|
||||
netif_rx_complete(napi);
|
||||
b44_enable_ints(bp);
|
||||
}
|
||||
|
||||
@@ -907,13 +907,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
|
||||
goto irq_ack;
|
||||
}
|
||||
|
||||
if (netif_rx_schedule_prep(dev, &bp->napi)) {
|
||||
if (netif_rx_schedule_prep(&bp->napi)) {
|
||||
/* NOTE: These writes are posted by the readback of
|
||||
* the ISTAT register below.
|
||||
*/
|
||||
bp->istat = istat;
|
||||
__b44_disable_ints(bp);
|
||||
__netif_rx_schedule(dev, &bp->napi);
|
||||
__netif_rx_schedule(&bp->napi);
|
||||
} else {
|
||||
printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
|
||||
dev->name);
|
||||
|
||||
+6
-9
@@ -3043,7 +3043,6 @@ bnx2_msi(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnx2_napi *bnapi = dev_instance;
|
||||
struct bnx2 *bp = bnapi->bp;
|
||||
struct net_device *dev = bp->dev;
|
||||
|
||||
prefetch(bnapi->status_blk.msi);
|
||||
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
|
||||
@@ -3054,7 +3053,7 @@ bnx2_msi(int irq, void *dev_instance)
|
||||
if (unlikely(atomic_read(&bp->intr_sem) != 0))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
netif_rx_schedule(dev, &bnapi->napi);
|
||||
netif_rx_schedule(&bnapi->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -3064,7 +3063,6 @@ bnx2_msi_1shot(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnx2_napi *bnapi = dev_instance;
|
||||
struct bnx2 *bp = bnapi->bp;
|
||||
struct net_device *dev = bp->dev;
|
||||
|
||||
prefetch(bnapi->status_blk.msi);
|
||||
|
||||
@@ -3072,7 +3070,7 @@ bnx2_msi_1shot(int irq, void *dev_instance)
|
||||
if (unlikely(atomic_read(&bp->intr_sem) != 0))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
netif_rx_schedule(dev, &bnapi->napi);
|
||||
netif_rx_schedule(&bnapi->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -3082,7 +3080,6 @@ bnx2_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnx2_napi *bnapi = dev_instance;
|
||||
struct bnx2 *bp = bnapi->bp;
|
||||
struct net_device *dev = bp->dev;
|
||||
struct status_block *sblk = bnapi->status_blk.msi;
|
||||
|
||||
/* When using INTx, it is possible for the interrupt to arrive
|
||||
@@ -3109,9 +3106,9 @@ bnx2_interrupt(int irq, void *dev_instance)
|
||||
if (unlikely(atomic_read(&bp->intr_sem) != 0))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
|
||||
if (netif_rx_schedule_prep(&bnapi->napi)) {
|
||||
bnapi->last_status_idx = sblk->status_idx;
|
||||
__netif_rx_schedule(dev, &bnapi->napi);
|
||||
__netif_rx_schedule(&bnapi->napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -3221,7 +3218,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
|
||||
rmb();
|
||||
if (likely(!bnx2_has_fast_work(bnapi))) {
|
||||
|
||||
netif_rx_complete(bp->dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
|
||||
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
|
||||
bnapi->last_status_idx);
|
||||
@@ -3254,7 +3251,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
rmb();
|
||||
if (likely(!bnx2_has_work(bnapi))) {
|
||||
netif_rx_complete(bp->dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
|
||||
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
|
||||
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
|
||||
|
||||
@@ -1615,7 +1615,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
|
||||
prefetch(&fp->status_blk->c_status_block.status_block_index);
|
||||
prefetch(&fp->status_blk->u_status_block.status_block_index);
|
||||
|
||||
netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
|
||||
netif_rx_schedule(&bnx2x_fp(bp, index, napi));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -1654,7 +1654,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
|
||||
prefetch(&fp->status_blk->c_status_block.status_block_index);
|
||||
prefetch(&fp->status_blk->u_status_block.status_block_index);
|
||||
|
||||
netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
|
||||
netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
|
||||
|
||||
status &= ~mask;
|
||||
}
|
||||
@@ -9284,7 +9284,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
poll_panic:
|
||||
#endif
|
||||
netif_rx_complete(bp->dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
|
||||
bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
|
||||
le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
|
||||
|
||||
@@ -2506,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
|
||||
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
|
||||
#ifdef USE_NAPI
|
||||
cas_mask_intr(cp);
|
||||
netif_rx_schedule(dev, &cp->napi);
|
||||
netif_rx_schedule(&cp->napi);
|
||||
#else
|
||||
cas_rx_ringN(cp, ring, 0);
|
||||
#endif
|
||||
@@ -2557,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id)
|
||||
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
|
||||
#ifdef USE_NAPI
|
||||
cas_mask_intr(cp);
|
||||
netif_rx_schedule(dev, &cp->napi);
|
||||
netif_rx_schedule(&cp->napi);
|
||||
#else
|
||||
cas_rx_ringN(cp, 1, 0);
|
||||
#endif
|
||||
@@ -2613,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id)
|
||||
if (status & INTR_RX_DONE) {
|
||||
#ifdef USE_NAPI
|
||||
cas_mask_intr(cp);
|
||||
netif_rx_schedule(dev, &cp->napi);
|
||||
netif_rx_schedule(&cp->napi);
|
||||
#else
|
||||
cas_rx_ringN(cp, 0, 0);
|
||||
#endif
|
||||
@@ -2691,7 +2691,7 @@ rx_comp:
|
||||
#endif
|
||||
spin_unlock_irqrestore(&cp->lock, flags);
|
||||
if (enable_intr) {
|
||||
netif_rx_complete(dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
cas_unmask_intr(cp);
|
||||
}
|
||||
return credits;
|
||||
|
||||
@@ -1613,7 +1613,7 @@ int t1_poll(struct napi_struct *napi, int budget)
|
||||
int work_done = process_responses(adapter, budget);
|
||||
|
||||
if (likely(work_done < budget)) {
|
||||
netif_rx_complete(dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
writel(adapter->sge->respQ.cidx,
|
||||
adapter->regs + A_SG_SLEEPING);
|
||||
}
|
||||
@@ -1633,7 +1633,7 @@ irqreturn_t t1_interrupt(int irq, void *data)
|
||||
|
||||
if (napi_schedule_prep(&adapter->napi)) {
|
||||
if (process_pure_responses(adapter))
|
||||
__netif_rx_schedule(dev, &adapter->napi);
|
||||
__netif_rx_schedule(&adapter->napi);
|
||||
else {
|
||||
/* no data, no NAPI needed */
|
||||
writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
|
||||
|
||||
+5
-5
@@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
|
||||
printk(KERN_WARNING "%s: rx: polling, but no queue\n",
|
||||
priv->dev->name);
|
||||
spin_unlock(&priv->rx_lock);
|
||||
netif_rx_complete(priv->dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
|
||||
if (processed == 0) {
|
||||
/* we ran out of packets to read,
|
||||
* revert to interrupt-driven mode */
|
||||
netif_rx_complete(priv->dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
|
||||
return 0;
|
||||
}
|
||||
@@ -536,7 +536,7 @@ fatal_error:
|
||||
}
|
||||
|
||||
spin_unlock(&priv->rx_lock);
|
||||
netif_rx_complete(priv->dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
netif_tx_stop_all_queues(priv->dev);
|
||||
napi_disable(&priv->napi);
|
||||
|
||||
@@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
|
||||
|
||||
if (status & MAC_INT_RX) {
|
||||
queue = (status >> 8) & 7;
|
||||
if (netif_rx_schedule_prep(dev, &priv->napi)) {
|
||||
if (netif_rx_schedule_prep(&priv->napi)) {
|
||||
cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
|
||||
__netif_rx_schedule(dev, &priv->napi);
|
||||
__netif_rx_schedule(&priv->napi);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+3
-4
@@ -2049,9 +2049,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
|
||||
if(stat_ack & stat_ack_rnr)
|
||||
nic->ru_running = RU_SUSPENDED;
|
||||
|
||||
if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
|
||||
if(likely(netif_rx_schedule_prep(&nic->napi))) {
|
||||
e100_disable_irq(nic);
|
||||
__netif_rx_schedule(netdev, &nic->napi);
|
||||
__netif_rx_schedule(&nic->napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -2060,7 +2060,6 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
|
||||
static int e100_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct nic *nic = container_of(napi, struct nic, napi);
|
||||
struct net_device *netdev = nic->netdev;
|
||||
unsigned int work_done = 0;
|
||||
|
||||
e100_rx_clean(nic, &work_done, budget);
|
||||
@@ -2068,7 +2067,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
/* If budget not fully consumed, exit the polling mode */
|
||||
if (work_done < budget) {
|
||||
netif_rx_complete(netdev, napi);
|
||||
netif_rx_complete(napi);
|
||||
e100_enable_irq(nic);
|
||||
}
|
||||
|
||||
|
||||
@@ -3687,12 +3687,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
|
||||
if (likely(netif_rx_schedule_prep(&adapter->napi))) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
adapter->total_rx_bytes = 0;
|
||||
adapter->total_rx_packets = 0;
|
||||
__netif_rx_schedule(netdev, &adapter->napi);
|
||||
__netif_rx_schedule(&adapter->napi);
|
||||
} else
|
||||
e1000_irq_enable(adapter);
|
||||
|
||||
@@ -3747,12 +3747,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
||||
ew32(IMC, ~0);
|
||||
E1000_WRITE_FLUSH();
|
||||
}
|
||||
if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
|
||||
if (likely(netif_rx_schedule_prep(&adapter->napi))) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
adapter->total_rx_bytes = 0;
|
||||
adapter->total_rx_packets = 0;
|
||||
__netif_rx_schedule(netdev, &adapter->napi);
|
||||
__netif_rx_schedule(&adapter->napi);
|
||||
} else
|
||||
/* this really should not happen! if it does it is basically a
|
||||
* bug, but not a hard error, so enable ints and continue */
|
||||
@@ -3793,7 +3793,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
|
||||
if (work_done < budget) {
|
||||
if (likely(adapter->itr_setting & 3))
|
||||
e1000_set_itr(adapter);
|
||||
netif_rx_complete(poll_dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
e1000_irq_enable(adapter);
|
||||
}
|
||||
|
||||
|
||||
@@ -1179,12 +1179,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
|
||||
if (netif_rx_schedule_prep(&adapter->napi)) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
adapter->total_rx_bytes = 0;
|
||||
adapter->total_rx_packets = 0;
|
||||
__netif_rx_schedule(netdev, &adapter->napi);
|
||||
__netif_rx_schedule(&adapter->napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -1246,12 +1246,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
|
||||
if (netif_rx_schedule_prep(&adapter->napi)) {
|
||||
adapter->total_tx_bytes = 0;
|
||||
adapter->total_tx_packets = 0;
|
||||
adapter->total_rx_bytes = 0;
|
||||
adapter->total_rx_packets = 0;
|
||||
__netif_rx_schedule(netdev, &adapter->napi);
|
||||
__netif_rx_schedule(&adapter->napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -1320,10 +1320,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
|
||||
adapter->rx_ring->set_itr = 0;
|
||||
}
|
||||
|
||||
if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
|
||||
if (netif_rx_schedule_prep(&adapter->napi)) {
|
||||
adapter->total_rx_bytes = 0;
|
||||
adapter->total_rx_packets = 0;
|
||||
__netif_rx_schedule(netdev, &adapter->napi);
|
||||
__netif_rx_schedule(&adapter->napi);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -2028,7 +2028,7 @@ clean_rx:
|
||||
if (work_done < budget) {
|
||||
if (adapter->itr_setting & 3)
|
||||
e1000_set_itr(adapter);
|
||||
netif_rx_complete(poll_dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
if (adapter->msix_entries)
|
||||
ew32(IMS, adapter->rx_ring->ims_val);
|
||||
else
|
||||
|
||||
@@ -830,7 +830,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
|
||||
while ((rx != budget) || force_irq) {
|
||||
pr->poll_counter = 0;
|
||||
force_irq = 0;
|
||||
netif_rx_complete(dev, napi);
|
||||
netif_rx_complete(napi);
|
||||
ehea_reset_cq_ep(pr->recv_cq);
|
||||
ehea_reset_cq_ep(pr->send_cq);
|
||||
ehea_reset_cq_n1(pr->recv_cq);
|
||||
@@ -859,7 +859,7 @@ static void ehea_netpoll(struct net_device *dev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < port->num_def_qps; i++)
|
||||
netif_rx_schedule(dev, &port->port_res[i].napi);
|
||||
netif_rx_schedule(&port->port_res[i].napi);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -867,7 +867,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
|
||||
{
|
||||
struct ehea_port_res *pr = param;
|
||||
|
||||
netif_rx_schedule(pr->port->netdev, &pr->napi);
|
||||
netif_rx_schedule(&pr->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -411,8 +411,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
|
||||
}
|
||||
|
||||
if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
|
||||
if (netif_rx_schedule_prep(netdev, &enic->napi))
|
||||
__netif_rx_schedule(netdev, &enic->napi);
|
||||
if (netif_rx_schedule_prep(&enic->napi))
|
||||
__netif_rx_schedule(&enic->napi);
|
||||
} else {
|
||||
vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
|
||||
}
|
||||
@@ -440,7 +440,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
|
||||
* writes).
|
||||
*/
|
||||
|
||||
netif_rx_schedule(enic->netdev, &enic->napi);
|
||||
netif_rx_schedule(&enic->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -450,7 +450,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
|
||||
struct enic *enic = data;
|
||||
|
||||
/* schedule NAPI polling for RQ cleanup */
|
||||
netif_rx_schedule(enic->netdev, &enic->napi);
|
||||
netif_rx_schedule(&enic->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -1068,7 +1068,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
|
||||
if (netdev->features & NETIF_F_LRO)
|
||||
lro_flush_all(&enic->lro_mgr);
|
||||
|
||||
netif_rx_complete(netdev, napi);
|
||||
netif_rx_complete(napi);
|
||||
vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
|
||||
}
|
||||
|
||||
@@ -1112,7 +1112,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
|
||||
if (netdev->features & NETIF_F_LRO)
|
||||
lro_flush_all(&enic->lro_mgr);
|
||||
|
||||
netif_rx_complete(netdev, napi);
|
||||
netif_rx_complete(napi);
|
||||
vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user