You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Handle multicast packets properly in fast-RX path of mac80211, from
Johannes Berg.
2) Because of a logic bug, the user can't actually force SW
checksumming on r8152 devices. This makes diagnosis of hw
checksumming bugs really annoying. Fix from Hayes Wang.
3) VXLAN route lookup does not take the source and destination ports
into account, which means IPSEC policies cannot be matched properly.
Fix from Martynas Pumputis.
4) Do proper RCU locking in netvsc callbacks, from Stephen Hemminger.
5) Fix SKB leaks in mlxsw driver, from Arkadi Sharshevsky.
6) If lwtunnel_fill_encap() fails, we do not abort the netlink message
construction properly in fib_dump_info(), from David Ahern.
7) Do not use kernel stack for DMA buffers in atusb driver, from Stefan
Schmidt.
8) Openvswitch conntack actions need to maintain a correct checksum,
fix from Lance Richardson.
9) ax25_disconnect() is missing a check for ax25->sk being NULL, in
fact it already checks this, but not in all of the necessary spots.
Fix from Basil Gunn.
10) Action GET operations in the packet scheduler can erroneously bump
the reference count of the entry, making it unreleasable. Fix from
Jamal Hadi Salim. Jamal gives a great set of example command lines
that trigger this in the commit message.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
net sched actions: fix refcnt when GETing of action after bind
net/mlx4_core: Eliminate warning messages for SRQ_LIMIT under SRIOV
net/mlx4_core: Fix when to save some qp context flags for dynamic VST to VGT transitions
net/mlx4_core: Fix racy CQ (Completion Queue) free
net: stmmac: don't use netdev_[dbg, info, ..] before net_device is registered
net/mlx5e: Fix a -Wmaybe-uninitialized warning
ax25: Fix segfault after sock connection timeout
bpf: rework prog_digest into prog_tag
tipc: allocate user memory with GFP_KERNEL flag
net: phy: dp83867: allow RGMII_TXID/RGMII_RXID interface types
ip6_tunnel: Account for tunnel header in tunnel MTU
mld: do not remove mld souce list info when set link down
be2net: fix MAC addr setting on privileged BE3 VFs
be2net: don't delete MAC on close on unprivileged BE3 VFs
be2net: fix status check in be_cmd_pmac_add()
cpmac: remove hopeless #warning
ravb: do not use zero-length alignment DMA descriptor
mlx4: do not call napi_schedule() without care
openvswitch: maintain correct checksum state in conntrack actions
tcp: fix tcp_fastopen unaligned access complaints on sparc
...
This commit is contained in:
@@ -3,9 +3,11 @@
|
||||
Required properties:
|
||||
- reg - The ID number for the phy, usually a small integer
|
||||
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
|
||||
for applicable values
|
||||
for applicable values. Required only if interface type is
|
||||
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
|
||||
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
|
||||
for applicable values
|
||||
for applicable values. Required only if interface type is
|
||||
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
|
||||
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
|
||||
for applicable values
|
||||
|
||||
|
||||
@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
||||
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
struct bcm_sysport_cb *cb;
|
||||
struct netdev_queue *txq;
|
||||
u32 hw_ind;
|
||||
|
||||
txq = netdev_get_tx_queue(ndev, ring->index);
|
||||
|
||||
/* Compute how many descriptors have been processed since last call */
|
||||
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
|
||||
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
|
||||
@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
||||
|
||||
ring->c_index = c_index;
|
||||
|
||||
if (netif_tx_queue_stopped(txq) && pkts_compl)
|
||||
netif_tx_wake_queue(txq);
|
||||
|
||||
netif_dbg(priv, tx_done, ndev,
|
||||
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
|
||||
ring->index, ring->c_index, pkts_compl, bytes_compl);
|
||||
@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
||||
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
||||
struct bcm_sysport_tx_ring *ring)
|
||||
{
|
||||
struct netdev_queue *txq;
|
||||
unsigned int released;
|
||||
unsigned long flags;
|
||||
|
||||
txq = netdev_get_tx_queue(priv->netdev, ring->index);
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
released = __bcm_sysport_tx_reclaim(priv, ring);
|
||||
if (released)
|
||||
netif_tx_wake_queue(txq);
|
||||
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
return released;
|
||||
}
|
||||
|
||||
/* Locked version of the per-ring TX reclaim, but does not wake the queue */
|
||||
static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
|
||||
struct bcm_sysport_tx_ring *ring)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
__bcm_sysport_tx_reclaim(priv, ring);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
}
|
||||
|
||||
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct bcm_sysport_tx_ring *ring =
|
||||
@@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
|
||||
napi_disable(&ring->napi);
|
||||
netif_napi_del(&ring->napi);
|
||||
|
||||
bcm_sysport_tx_reclaim(priv, ring);
|
||||
bcm_sysport_tx_clean(priv, ring);
|
||||
|
||||
kfree(ring->cbs);
|
||||
ring->cbs = NULL;
|
||||
|
||||
@@ -47,8 +47,9 @@ struct lmac {
|
||||
struct bgx {
|
||||
u8 bgx_id;
|
||||
struct lmac lmac[MAX_LMAC_PER_BGX];
|
||||
int lmac_count;
|
||||
u8 lmac_count;
|
||||
u8 max_lmac;
|
||||
u8 acpi_lmac_idx;
|
||||
void __iomem *reg_base;
|
||||
struct pci_dev *pdev;
|
||||
bool is_dlm;
|
||||
@@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
|
||||
if (acpi_bus_get_device(handle, &adev))
|
||||
goto out;
|
||||
|
||||
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
|
||||
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
|
||||
|
||||
SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
|
||||
SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
|
||||
|
||||
bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
|
||||
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
|
||||
bgx->acpi_lmac_idx++; /* move to next LMAC */
|
||||
out:
|
||||
bgx->lmac_count++;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
|
||||
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
||||
err:
|
||||
mutex_unlock(&adapter->mcc_lock);
|
||||
|
||||
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
|
||||
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
|
||||
status = -EPERM;
|
||||
|
||||
return status;
|
||||
|
||||
@@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
|
||||
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
|
||||
return 0;
|
||||
|
||||
/* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
|
||||
* address
|
||||
*/
|
||||
if (BEx_chip(adapter) && be_virtfn(adapter) &&
|
||||
!check_privilege(adapter, BE_PRIV_FILTMGMT))
|
||||
return -EPERM;
|
||||
|
||||
/* if device is not running, copy MAC to netdev->dev_addr */
|
||||
if (!netif_running(netdev))
|
||||
goto done;
|
||||
@@ -3609,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
|
||||
|
||||
static void be_disable_if_filters(struct be_adapter *adapter)
|
||||
{
|
||||
be_dev_mac_del(adapter, adapter->pmac_id[0]);
|
||||
/* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
|
||||
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
|
||||
check_privilege(adapter, BE_PRIV_FILTMGMT))
|
||||
be_dev_mac_del(adapter, adapter->pmac_id[0]);
|
||||
|
||||
be_clear_uc_list(adapter);
|
||||
be_clear_mc_list(adapter);
|
||||
|
||||
@@ -3762,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* For BE3 VFs, the PF programs the initial MAC address */
|
||||
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
|
||||
/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
|
||||
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
|
||||
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
|
||||
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
||||
{
|
||||
struct mlx4_cq *cq;
|
||||
|
||||
rcu_read_lock();
|
||||
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
|
||||
cqn & (dev->caps.num_cqs - 1));
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!cq) {
|
||||
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Acessing the CQ outside of rcu_read_lock is safe, because
|
||||
* the CQ is freed only after interrupt handling is completed.
|
||||
*/
|
||||
++cq->arm_sn;
|
||||
|
||||
cq->comp(cq);
|
||||
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
|
||||
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
||||
struct mlx4_cq *cq;
|
||||
|
||||
spin_lock(&cq_table->lock);
|
||||
|
||||
rcu_read_lock();
|
||||
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcount);
|
||||
|
||||
spin_unlock(&cq_table->lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!cq) {
|
||||
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
|
||||
mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Acessing the CQ outside of rcu_read_lock is safe, because
|
||||
* the CQ is freed only after interrupt handling is completed.
|
||||
*/
|
||||
cq->event(cq, event_type);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
}
|
||||
|
||||
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
spin_lock(&cq_table->lock);
|
||||
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
spin_unlock(&cq_table->lock);
|
||||
if (err)
|
||||
goto err_icm;
|
||||
|
||||
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
||||
return 0;
|
||||
|
||||
err_radix:
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
spin_lock(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, cq->cqn);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
spin_unlock(&cq_table->lock);
|
||||
|
||||
err_icm:
|
||||
mlx4_cq_free_icm(dev, cq->cqn);
|
||||
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
|
||||
if (err)
|
||||
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
|
||||
|
||||
spin_lock(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, cq->cqn);
|
||||
spin_unlock(&cq_table->lock);
|
||||
|
||||
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
|
||||
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
|
||||
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
|
||||
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
|
||||
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, cq->cqn);
|
||||
spin_unlock_irq(&cq_table->lock);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
wait_for_completion(&cq->free);
|
||||
|
||||
@@ -1748,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
|
||||
/* Process all completions if exist to prevent
|
||||
* the queues freezing if they are full
|
||||
*/
|
||||
for (i = 0; i < priv->rx_ring_num; i++)
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
local_bh_disable();
|
||||
napi_schedule(&priv->rx_cq[i]->napi);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
netif_tx_start_all_queues(dev);
|
||||
netif_device_attach(dev);
|
||||
|
||||
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
||||
break;
|
||||
|
||||
case MLX4_EVENT_TYPE_SRQ_LIMIT:
|
||||
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
|
||||
__func__);
|
||||
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
|
||||
__func__, be32_to_cpu(eqe->event.srq.srqn),
|
||||
eq->eqn);
|
||||
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
if (mlx4_is_master(dev)) {
|
||||
/* forward only to slave owning the SRQ */
|
||||
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
||||
eq->eqn, eq->cons_index, ret);
|
||||
break;
|
||||
}
|
||||
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
|
||||
__func__, slave,
|
||||
be32_to_cpu(eqe->event.srq.srqn),
|
||||
eqe->type, eqe->subtype);
|
||||
if (eqe->type ==
|
||||
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
|
||||
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
|
||||
__func__, slave,
|
||||
be32_to_cpu(eqe->event.srq.srqn),
|
||||
eqe->type, eqe->subtype);
|
||||
|
||||
if (!ret && slave != dev->caps.function) {
|
||||
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
|
||||
__func__, eqe->type,
|
||||
eqe->subtype, slave);
|
||||
if (eqe->type ==
|
||||
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
|
||||
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
|
||||
__func__, eqe->type,
|
||||
eqe->subtype, slave);
|
||||
mlx4_slave_event(dev, slave, eqe);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
put_res(dev, slave, srqn, RES_SRQ);
|
||||
qp->srq = srq;
|
||||
}
|
||||
|
||||
/* Save param3 for dynamic changes from VST back to VGT */
|
||||
qp->param3 = qpc->param3;
|
||||
put_res(dev, slave, rcqn, RES_CQ);
|
||||
put_res(dev, slave, mtt_base, RES_MTT);
|
||||
res_end_move(dev, slave, RES_QP, qpn);
|
||||
@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
int qpn = vhcr->in_modifier & 0x7fffff;
|
||||
struct res_qp *qp;
|
||||
u8 orig_sched_queue;
|
||||
__be32 orig_param3 = qpc->param3;
|
||||
u8 orig_vlan_control = qpc->pri_path.vlan_control;
|
||||
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
|
||||
u8 orig_pri_path_fl = qpc->pri_path.fl;
|
||||
@@ -3814,7 +3816,6 @@ out:
|
||||
*/
|
||||
if (!err) {
|
||||
qp->sched_queue = orig_sched_queue;
|
||||
qp->param3 = orig_param3;
|
||||
qp->vlan_control = orig_vlan_control;
|
||||
qp->fvl_rx = orig_fvl_rx;
|
||||
qp->pri_path_fl = orig_pri_path_fl;
|
||||
|
||||
@@ -668,9 +668,12 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
||||
int ttl;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INET)
|
||||
int ret;
|
||||
|
||||
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
ret = PTR_ERR_OR_ZERO(rt);
|
||||
if (ret)
|
||||
return ret;
|
||||
#else
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
@@ -741,8 +744,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
|
||||
struct flowi4 fl4 = {};
|
||||
char *encap_header;
|
||||
int encap_size;
|
||||
__be32 saddr = 0;
|
||||
int ttl = 0;
|
||||
__be32 saddr;
|
||||
int ttl;
|
||||
int err;
|
||||
|
||||
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
|
||||
|
||||
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
|
||||
/* pci_eqe_cmd_token
|
||||
* Command completion event - token
|
||||
*/
|
||||
MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
|
||||
MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
|
||||
|
||||
/* pci_eqe_cmd_status
|
||||
* Command completion event - status
|
||||
*/
|
||||
MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
|
||||
MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
|
||||
|
||||
/* pci_eqe_cmd_out_param_h
|
||||
* Command completion event - output parameter - higher part
|
||||
*/
|
||||
MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
|
||||
MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
|
||||
|
||||
/* pci_eqe_cmd_out_param_l
|
||||
* Command completion event - output parameter - lower part
|
||||
*/
|
||||
MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
|
||||
MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
|
||||
dev_kfree_skb_any(skb_orig);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
dev_consume_skb_any(skb_orig);
|
||||
}
|
||||
|
||||
if (eth_skb_pad(skb)) {
|
||||
|
||||
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
|
||||
dev_kfree_skb_any(skb_orig);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
dev_consume_skb_any(skb_orig);
|
||||
}
|
||||
mlxsw_sx_txhdr_construct(skb, &tx_info);
|
||||
/* TX header is consumed by HW on the way so we shouldn't count its
|
||||
|
||||
@@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
|
||||
else
|
||||
adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
|
||||
|
||||
/* of_phy_find_device() claims a reference to the phydev,
|
||||
* so we do that here manually as well. When the driver
|
||||
* later unloads, it can unilaterally drop the reference
|
||||
* without worrying about ACPI vs DT.
|
||||
*/
|
||||
if (adpt->phydev)
|
||||
get_device(&adpt->phydev->mdio.dev);
|
||||
} else {
|
||||
struct device_node *phy_np;
|
||||
|
||||
|
||||
@@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev)
|
||||
err_undo_napi:
|
||||
netif_napi_del(&adpt->rx_q.napi);
|
||||
err_undo_mdiobus:
|
||||
if (!has_acpi_companion(&pdev->dev))
|
||||
put_device(&adpt->phydev->mdio.dev);
|
||||
put_device(&adpt->phydev->mdio.dev);
|
||||
mdiobus_unregister(adpt->mii_bus);
|
||||
err_undo_clocks:
|
||||
emac_clks_teardown(adpt);
|
||||
@@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev)
|
||||
|
||||
emac_clks_teardown(adpt);
|
||||
|
||||
if (!has_acpi_companion(&pdev->dev))
|
||||
put_device(&adpt->phydev->mdio.dev);
|
||||
put_device(&adpt->phydev->mdio.dev);
|
||||
mdiobus_unregister(adpt->mii_bus);
|
||||
free_netdev(netdev);
|
||||
|
||||
|
||||
@@ -926,14 +926,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
||||
/* Receive error message handling */
|
||||
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
|
||||
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
|
||||
if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
|
||||
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
|
||||
ndev->stats.rx_over_errors = priv->rx_over_errors;
|
||||
netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
|
||||
}
|
||||
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
|
||||
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
|
||||
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
|
||||
netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
|
||||
}
|
||||
out:
|
||||
return budget - quota;
|
||||
}
|
||||
@@ -1508,6 +1504,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
|
||||
entry / NUM_TX_DESC * DPTR_ALIGN;
|
||||
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
|
||||
/* Zero length DMA descriptors are problematic as they seem to
|
||||
* terminate DMA transfers. Avoid them by simply using a length of
|
||||
* DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
|
||||
*
|
||||
* As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
|
||||
* data by the call to skb_put_padto() above this is safe with
|
||||
* respect to both the length of the first DMA descriptor (len)
|
||||
* overflowing the available data and the length of the second DMA
|
||||
* descriptor (skb->len - len) being negative.
|
||||
*/
|
||||
if (len == 0)
|
||||
len = DPTR_ALIGN;
|
||||
|
||||
memcpy(buffer, skb->data, len);
|
||||
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
||||
|
||||
@@ -3326,9 +3326,9 @@ int stmmac_dvr_probe(struct device *device,
|
||||
(priv->plat->maxmtu >= ndev->min_mtu))
|
||||
ndev->max_mtu = priv->plat->maxmtu;
|
||||
else if (priv->plat->maxmtu < ndev->min_mtu)
|
||||
netdev_warn(priv->dev,
|
||||
"%s: warning: maxmtu having invalid value (%d)\n",
|
||||
__func__, priv->plat->maxmtu);
|
||||
dev_warn(priv->device,
|
||||
"%s: warning: maxmtu having invalid value (%d)\n",
|
||||
__func__, priv->plat->maxmtu);
|
||||
|
||||
if (flow_ctrl)
|
||||
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
|
||||
@@ -3340,7 +3340,8 @@ int stmmac_dvr_probe(struct device *device,
|
||||
*/
|
||||
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
|
||||
priv->use_riwt = 1;
|
||||
netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
|
||||
dev_info(priv->device,
|
||||
"Enable RX Mitigation via HW Watchdog Timer\n");
|
||||
}
|
||||
|
||||
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
|
||||
@@ -3366,17 +3367,17 @@ int stmmac_dvr_probe(struct device *device,
|
||||
/* MDIO bus Registration */
|
||||
ret = stmmac_mdio_register(ndev);
|
||||
if (ret < 0) {
|
||||
netdev_err(priv->dev,
|
||||
"%s: MDIO bus (id: %d) registration failed",
|
||||
__func__, priv->plat->bus_id);
|
||||
dev_err(priv->device,
|
||||
"%s: MDIO bus (id: %d) registration failed",
|
||||
__func__, priv->plat->bus_id);
|
||||
goto error_mdio_register;
|
||||
}
|
||||
}
|
||||
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
|
||||
__func__, ret);
|
||||
dev_err(priv->device, "%s: ERROR %i registering the device\n",
|
||||
__func__, ret);
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
|
||||
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
||||
#warning FIXME: unhardcode gpio&reset bits
|
||||
/* FIXME: unhardcode gpio&reset bits */
|
||||
ar7_gpio_disable(26);
|
||||
ar7_gpio_disable(27);
|
||||
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
|
||||
|
||||
@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
|
||||
* policy filters on the host). Deliver these via the VF
|
||||
* interface in the guest.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
|
||||
if (vf_netdev && (vf_netdev->flags & IFF_UP))
|
||||
net = vf_netdev;
|
||||
@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
|
||||
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
|
||||
if (unlikely(!skb)) {
|
||||
++net->stats.rx_dropped;
|
||||
rcu_read_unlock();
|
||||
return NVSP_STAT_FAIL;
|
||||
}
|
||||
|
||||
@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
|
||||
* TODO - use NAPI?
|
||||
*/
|
||||
netif_rx(skb);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
|
||||
/* Reset */
|
||||
if (gpio_is_valid(rstn)) {
|
||||
udelay(1);
|
||||
gpio_set_value(rstn, 0);
|
||||
gpio_set_value_cansleep(rstn, 0);
|
||||
udelay(1);
|
||||
gpio_set_value(rstn, 1);
|
||||
gpio_set_value_cansleep(rstn, 1);
|
||||
usleep_range(120, 240);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user