mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'net-5.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
"Including fixes from ipsec.
Current release - regressions:
- sched: taprio: fix init procedure to avoid inf loop when dumping
- sctp: move the active_key update after sh_keys is added
Current release - new code bugs:
- sparx5: fix build with old GCC & bitmask on 32-bit targets
Previous releases - regressions:
- xfrm: redo the PREEMPT_RT RCU vs hash_resize_mutex deadlock fix
- xfrm: fixes for the compat netlink attribute translator
- phy: micrel: Fix detection of ksz87xx switch
Previous releases - always broken:
- gro: set inner transport header offset in tcp/udp GRO hook to avoid
crashes when such packets reach GSO
- vsock: handle VIRTIO_VSOCK_OP_CREDIT_REQUEST, as required by spec
- dsa: sja1105: fix static FDB entries on SJA1105P/Q/R/S and SJA1110
- bridge: validate the NUD_PERMANENT bit when adding an extern_learn
FDB entry
- usb: lan78xx: don't modify phy_device state concurrently
- usb: pegasus: check for errors of IO routines"
* tag 'net-5.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (48 commits)
net: vxge: fix use-after-free in vxge_device_unregister
net: fec: fix use-after-free in fec_drv_remove
net: pegasus: fix uninit-value in get_interrupt_interval
net: ethernet: ti: am65-cpsw: fix crash in am65_cpsw_port_offload_fwd_mark_update()
bnx2x: fix an error code in bnx2x_nic_load()
net: wwan: iosm: fix recursive lock acquire in unregister
net: wwan: iosm: correct data protocol mask bit
net: wwan: iosm: endianness type correction
net: wwan: iosm: fix lkp buildbot warning
net: usb: lan78xx: don't modify phy_device state concurrently
docs: networking: netdevsim rules
net: usb: pegasus: Remove the changelog and DRIVER_VERSION.
net: usb: pegasus: Check the return value of get_geristers() and friends;
net/prestera: Fix devlink groups leakage in error flow
net: sched: fix lockdep_set_class() typo error for sch->seqlock
net: dsa: qca: ar9331: reorder MDIO write sequence
VSOCK: handle VIRTIO_VSOCK_OP_CREDIT_REQUEST
mptcp: drop unused rcu member in mptcp_pm_addr_entry
net: ipv6: fix returned variable type in ip6_skb_dst_mtu
nfp: update ethtool reporting of pauseframe control
...
This commit is contained in:
@@ -228,6 +228,23 @@ before posting to the mailing list. The patchwork build bot instance
|
||||
gets overloaded very easily and netdev@vger really doesn't need more
|
||||
traffic if we can help it.
|
||||
|
||||
netdevsim is great, can I extend it for my out-of-tree tests?
|
||||
-------------------------------------------------------------
|
||||
|
||||
No, `netdevsim` is a test vehicle solely for upstream tests.
|
||||
(Please add your tests under tools/testing/selftests/.)
|
||||
|
||||
We also give no guarantees that `netdevsim` won't change in the future
|
||||
in a way which would break what would normally be considered uAPI.
|
||||
|
||||
Is netdevsim considered a "user" of an API?
|
||||
-------------------------------------------
|
||||
|
||||
Linux kernel has a long standing rule that no API should be added unless
|
||||
it has a real, in-tree user. Mock-ups and tests based on `netdevsim` are
|
||||
strongly encouraged when adding new APIs, but `netdevsim` in itself
|
||||
is **not** considered a use case/user.
|
||||
|
||||
Any other tips to help ensure my net/net-next patch gets OK'd?
|
||||
--------------------------------------------------------------
|
||||
Attention to detail. Re-read your own work as if you were the
|
||||
|
||||
@@ -73,7 +73,9 @@ IF_OPER_LOWERLAYERDOWN (3):
|
||||
state (f.e. VLAN).
|
||||
|
||||
IF_OPER_TESTING (4):
|
||||
Unused in current kernel.
|
||||
Interface is in testing mode, for example executing driver self-tests
|
||||
or media (cable) test. It can't be used for normal traffic until tests
|
||||
complete.
|
||||
|
||||
IF_OPER_DORMANT (5):
|
||||
Interface is L1 up, but waiting for an external event, f.e. for a
|
||||
@@ -111,7 +113,7 @@ it as lower layer.
|
||||
|
||||
Note that for certain kind of soft-devices, which are not managing any
|
||||
real hardware, it is possible to set this bit from userspace. One
|
||||
should use TVL IFLA_CARRIER to do so.
|
||||
should use TLV IFLA_CARRIER to do so.
|
||||
|
||||
netif_carrier_ok() can be used to query that bit.
|
||||
|
||||
|
||||
@@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
|
||||
struct image_info *img_info);
|
||||
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
|
||||
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_chan *mhi_chan);
|
||||
struct mhi_chan *mhi_chan, unsigned int flags);
|
||||
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_chan *mhi_chan);
|
||||
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
||||
|
||||
@@ -1430,7 +1430,7 @@ exit_unprepare_channel:
|
||||
}
|
||||
|
||||
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_chan *mhi_chan)
|
||||
struct mhi_chan *mhi_chan, unsigned int flags)
|
||||
{
|
||||
int ret = 0;
|
||||
struct device *dev = &mhi_chan->mhi_dev->dev;
|
||||
@@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
|
||||
if (ret)
|
||||
goto error_pm_state;
|
||||
|
||||
if (mhi_chan->dir == DMA_FROM_DEVICE)
|
||||
mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
|
||||
|
||||
/* Pre-allocate buffer for xfer ring */
|
||||
if (mhi_chan->pre_alloc) {
|
||||
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
|
||||
@@ -1610,7 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
|
||||
}
|
||||
|
||||
/* Move channel to start state */
|
||||
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
|
||||
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
|
||||
{
|
||||
int ret, dir;
|
||||
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
@@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
|
||||
if (!mhi_chan)
|
||||
continue;
|
||||
|
||||
ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
|
||||
ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
|
||||
if (ret)
|
||||
goto error_open_chan;
|
||||
}
|
||||
|
||||
@@ -837,16 +837,24 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
/* In case of this switch we work with 32bit registers on top of 16bit
|
||||
* bus. Some registers (for example access to forwarding database) have
|
||||
* trigger bit on the first 16bit half of request, the result and
|
||||
* configuration of request in the second half.
|
||||
* To make it work properly, we should do the second part of transfer
|
||||
* before the first one is done.
|
||||
*/
|
||||
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
|
||||
val >> 16);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
|
||||
return ret;
|
||||
|
||||
@@ -304,6 +304,15 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
|
||||
hostcmd = SJA1105_HOSTCMD_INVALIDATE;
|
||||
}
|
||||
sja1105_packing(p, &hostcmd, 25, 23, size, op);
|
||||
}
|
||||
|
||||
static void
|
||||
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
|
||||
enum packing_op op)
|
||||
{
|
||||
int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
|
||||
|
||||
sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
|
||||
|
||||
/* Hack - The hardware takes the 'index' field within
|
||||
* struct sja1105_l2_lookup_entry as the index on which this command
|
||||
@@ -313,26 +322,18 @@ sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
|
||||
* such that our API doesn't need to ask for a full-blown entry
|
||||
* structure when e.g. a delete is requested.
|
||||
*/
|
||||
sja1105_packing(buf, &cmd->index, 15, 6,
|
||||
SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
|
||||
}
|
||||
|
||||
static void
|
||||
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
|
||||
enum packing_op op)
|
||||
{
|
||||
int size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
|
||||
|
||||
return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
|
||||
sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
|
||||
}
|
||||
|
||||
static void
|
||||
sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
|
||||
enum packing_op op)
|
||||
{
|
||||
int size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
|
||||
int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
|
||||
|
||||
return sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, size);
|
||||
sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
|
||||
|
||||
sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
|
||||
}
|
||||
|
||||
/* The switch is so retarded that it makes our command/entry abstraction
|
||||
|
||||
@@ -1318,10 +1318,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
|
||||
int sja1105et_fdb_add(struct dsa_switch *ds, int port,
|
||||
const unsigned char *addr, u16 vid)
|
||||
{
|
||||
struct sja1105_l2_lookup_entry l2_lookup = {0};
|
||||
struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
|
||||
struct sja1105_private *priv = ds->priv;
|
||||
struct device *dev = ds->dev;
|
||||
int last_unused = -1;
|
||||
int start, end, i;
|
||||
int bin, way, rc;
|
||||
|
||||
bin = sja1105et_fdb_hash(priv, addr, vid);
|
||||
@@ -1333,7 +1334,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
|
||||
* mask? If yes, we need to do nothing. If not, we need
|
||||
* to rewrite the entry by adding this port to it.
|
||||
*/
|
||||
if (l2_lookup.destports & BIT(port))
|
||||
if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
|
||||
return 0;
|
||||
l2_lookup.destports |= BIT(port);
|
||||
} else {
|
||||
@@ -1364,6 +1365,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
|
||||
index, NULL, false);
|
||||
}
|
||||
}
|
||||
l2_lookup.lockeds = true;
|
||||
l2_lookup.index = sja1105et_fdb_index(bin, way);
|
||||
|
||||
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
|
||||
@@ -1372,6 +1374,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port,
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* Invalidate a dynamically learned entry if that exists */
|
||||
start = sja1105et_fdb_index(bin, 0);
|
||||
end = sja1105et_fdb_index(bin, way);
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
|
||||
i, &tmp);
|
||||
if (rc == -ENOENT)
|
||||
continue;
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
|
||||
continue;
|
||||
|
||||
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
|
||||
i, NULL, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
|
||||
}
|
||||
|
||||
@@ -1413,32 +1438,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port,
|
||||
int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
|
||||
const unsigned char *addr, u16 vid)
|
||||
{
|
||||
struct sja1105_l2_lookup_entry l2_lookup = {0};
|
||||
struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
|
||||
struct sja1105_private *priv = ds->priv;
|
||||
int rc, i;
|
||||
|
||||
/* Search for an existing entry in the FDB table */
|
||||
l2_lookup.macaddr = ether_addr_to_u64(addr);
|
||||
l2_lookup.vlanid = vid;
|
||||
l2_lookup.iotag = SJA1105_S_TAG;
|
||||
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
|
||||
if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
|
||||
l2_lookup.mask_vlanid = VLAN_VID_MASK;
|
||||
l2_lookup.mask_iotag = BIT(0);
|
||||
} else {
|
||||
l2_lookup.mask_vlanid = 0;
|
||||
l2_lookup.mask_iotag = 0;
|
||||
}
|
||||
l2_lookup.mask_vlanid = VLAN_VID_MASK;
|
||||
l2_lookup.destports = BIT(port);
|
||||
|
||||
tmp = l2_lookup;
|
||||
|
||||
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
|
||||
SJA1105_SEARCH, &l2_lookup);
|
||||
if (rc == 0) {
|
||||
/* Found and this port is already in the entry's
|
||||
SJA1105_SEARCH, &tmp);
|
||||
if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
|
||||
/* Found a static entry and this port is already in the entry's
|
||||
* port mask => job done
|
||||
*/
|
||||
if (l2_lookup.destports & BIT(port))
|
||||
if ((tmp.destports & BIT(port)) && tmp.lockeds)
|
||||
return 0;
|
||||
|
||||
l2_lookup = tmp;
|
||||
|
||||
/* l2_lookup.index is populated by the switch in case it
|
||||
* found something.
|
||||
*/
|
||||
@@ -1460,16 +1483,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
|
||||
dev_err(ds->dev, "FDB is full, cannot add entry.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
l2_lookup.lockeds = true;
|
||||
l2_lookup.index = i;
|
||||
|
||||
skip_finding_an_index:
|
||||
l2_lookup.lockeds = true;
|
||||
|
||||
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
|
||||
l2_lookup.index, &l2_lookup,
|
||||
true);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* The switch learns dynamic entries and looks up the FDB left to
|
||||
* right. It is possible that our addition was concurrent with the
|
||||
* dynamic learning of the same address, so now that the static entry
|
||||
* has been installed, we are certain that address learning for this
|
||||
* particular address has been turned off, so the dynamic entry either
|
||||
* is in the FDB at an index smaller than the static one, or isn't (it
|
||||
* can also be at a larger index, but in that case it is inactive
|
||||
* because the static FDB entry will match first, and the dynamic one
|
||||
* will eventually age out). Search for a dynamically learned address
|
||||
* prior to our static one and invalidate it.
|
||||
*/
|
||||
tmp = l2_lookup;
|
||||
|
||||
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
|
||||
SJA1105_SEARCH, &tmp);
|
||||
if (rc < 0) {
|
||||
dev_err(ds->dev,
|
||||
"port %d failed to read back entry for %pM vid %d: %pe\n",
|
||||
port, addr, vid, ERR_PTR(rc));
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (tmp.index < l2_lookup.index) {
|
||||
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
|
||||
tmp.index, NULL, false);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
|
||||
}
|
||||
|
||||
@@ -1483,15 +1536,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
|
||||
|
||||
l2_lookup.macaddr = ether_addr_to_u64(addr);
|
||||
l2_lookup.vlanid = vid;
|
||||
l2_lookup.iotag = SJA1105_S_TAG;
|
||||
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
|
||||
if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
|
||||
l2_lookup.mask_vlanid = VLAN_VID_MASK;
|
||||
l2_lookup.mask_iotag = BIT(0);
|
||||
} else {
|
||||
l2_lookup.mask_vlanid = 0;
|
||||
l2_lookup.mask_iotag = 0;
|
||||
}
|
||||
l2_lookup.mask_vlanid = VLAN_VID_MASK;
|
||||
l2_lookup.destports = BIT(port);
|
||||
|
||||
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
|
||||
|
||||
@@ -2669,7 +2669,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
}
|
||||
|
||||
/* Allocated memory for FW statistics */
|
||||
if (bnx2x_alloc_fw_stats_mem(bp))
|
||||
rc = bnx2x_alloc_fw_stats_mem(bp);
|
||||
if (rc)
|
||||
LOAD_ERROR_EXIT(bp, load_error0);
|
||||
|
||||
/* request pf to initialize status blocks */
|
||||
|
||||
@@ -3843,13 +3843,13 @@ fec_drv_remove(struct platform_device *pdev)
|
||||
if (of_phy_is_fixed_link(np))
|
||||
of_phy_deregister_fixed_link(np);
|
||||
of_node_put(fep->phy_node);
|
||||
free_netdev(ndev);
|
||||
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
clk_disable_unprepare(fep->clk_ipg);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
free_netdev(ndev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -530,6 +530,8 @@ err_trap_register:
|
||||
prestera_trap = &prestera_trap_items_arr[i];
|
||||
devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
|
||||
}
|
||||
devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr,
|
||||
groups_count);
|
||||
err_groups_register:
|
||||
kfree(trap_data->trap_items_arr);
|
||||
err_trap_items_alloc:
|
||||
|
||||
@@ -13,19 +13,26 @@
|
||||
*/
|
||||
#define VSTAX 73
|
||||
|
||||
static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
|
||||
#define ifh_encode_bitfield(ifh, value, pos, _width) \
|
||||
({ \
|
||||
u32 width = (_width); \
|
||||
\
|
||||
/* Max width is 5 bytes - 40 bits. In worst case this will
|
||||
* spread over 6 bytes - 48 bits
|
||||
*/ \
|
||||
compiletime_assert(width <= 40, \
|
||||
"Unsupported width, must be <= 40"); \
|
||||
__ifh_encode_bitfield((ifh), (value), (pos), width); \
|
||||
})
|
||||
|
||||
static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
|
||||
{
|
||||
u8 *ifh_hdr = ifh;
|
||||
/* Calculate the Start IFH byte position of this IFH bit position */
|
||||
u32 byte = (35 - (pos / 8));
|
||||
/* Calculate the Start bit position in the Start IFH byte */
|
||||
u32 bit = (pos % 8);
|
||||
u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
|
||||
|
||||
/* Max width is 5 bytes - 40 bits. In worst case this will
|
||||
* spread over 6 bytes - 48 bits
|
||||
*/
|
||||
compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
|
||||
u64 encode = GENMASK_ULL(bit + width - 1, bit) & (value << bit);
|
||||
|
||||
/* The b0-b7 goes into the start IFH byte */
|
||||
if (encode & 0xFF)
|
||||
|
||||
@@ -819,7 +819,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
printk(version);
|
||||
#endif
|
||||
|
||||
i = pci_enable_device(pdev);
|
||||
i = pcim_enable_device(pdev);
|
||||
if (i) return i;
|
||||
|
||||
/* natsemi has a non-standard PM control register
|
||||
@@ -852,7 +852,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
ioaddr = ioremap(iostart, iosize);
|
||||
if (!ioaddr) {
|
||||
i = -ENOMEM;
|
||||
goto err_ioremap;
|
||||
goto err_pci_request_regions;
|
||||
}
|
||||
|
||||
/* Work around the dropped serial bit. */
|
||||
@@ -974,9 +974,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
err_register_netdev:
|
||||
iounmap(ioaddr);
|
||||
|
||||
err_ioremap:
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_pci_request_regions:
|
||||
free_netdev(dev);
|
||||
return i;
|
||||
@@ -3241,7 +3238,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
|
||||
|
||||
NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
|
||||
unregister_netdev (dev);
|
||||
pci_release_regions (pdev);
|
||||
iounmap(ioaddr);
|
||||
free_netdev (dev);
|
||||
}
|
||||
|
||||
@@ -3512,13 +3512,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
|
||||
|
||||
kfree(vdev->vpaths);
|
||||
|
||||
/* we are safe to free it now */
|
||||
free_netdev(dev);
|
||||
|
||||
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
|
||||
buf);
|
||||
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
|
||||
__func__, __LINE__);
|
||||
|
||||
/* we are safe to free it now */
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -286,6 +286,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
|
||||
|
||||
/* Init to unknowns */
|
||||
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
|
||||
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
|
||||
cmd->base.port = PORT_OTHER;
|
||||
cmd->base.speed = SPEED_UNKNOWN;
|
||||
cmd->base.duplex = DUPLEX_UNKNOWN;
|
||||
|
||||
@@ -501,6 +501,7 @@ struct qede_fastpath {
|
||||
#define QEDE_SP_HW_ERR 4
|
||||
#define QEDE_SP_ARFS_CONFIG 5
|
||||
#define QEDE_SP_AER 7
|
||||
#define QEDE_SP_DISABLE 8
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||
|
||||
@@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work)
|
||||
struct qede_dev *edev = container_of(work, struct qede_dev,
|
||||
sp_task.work);
|
||||
|
||||
/* Disable execution of this deferred work once
|
||||
* qede removal is in progress, this stop any future
|
||||
* scheduling of sp_task.
|
||||
*/
|
||||
if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
|
||||
return;
|
||||
|
||||
/* The locking scheme depends on the specific flag:
|
||||
* In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
|
||||
* ensure that ongoing flows are ended and new ones are not started.
|
||||
@@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
|
||||
qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
|
||||
|
||||
if (mode != QEDE_REMOVE_RECOVERY) {
|
||||
set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
|
||||
unregister_netdev(ndev);
|
||||
|
||||
cancel_delayed_work_sync(&edev->sp_task);
|
||||
|
||||
@@ -2060,8 +2060,12 @@ static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *comm
|
||||
|
||||
for (i = 1; i <= common->port_num; i++) {
|
||||
struct am65_cpsw_port *port = am65_common_get_port(common, i);
|
||||
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
|
||||
struct am65_cpsw_ndev_priv *priv;
|
||||
|
||||
if (!port->ndev)
|
||||
continue;
|
||||
|
||||
priv = am65_ndev_to_priv(port->ndev);
|
||||
priv->offload_fwd_mark = set_val;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,7 +335,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
|
||||
u64_stats_init(&mhi_netdev->stats.tx_syncp);
|
||||
|
||||
/* Start MHI channels */
|
||||
err = mhi_prepare_for_transfer(mhi_dev);
|
||||
err = mhi_prepare_for_transfer(mhi_dev, 0);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
|
||||
@@ -401,11 +401,11 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
|
||||
}
|
||||
|
||||
static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
|
||||
const u32 ksz_phy_id)
|
||||
const bool ksz_8051)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
|
||||
if ((phydev->phy_id & MICREL_PHY_ID_MASK) != PHY_ID_KSZ8051)
|
||||
return 0;
|
||||
|
||||
ret = phy_read(phydev, MII_BMSR);
|
||||
@@ -418,7 +418,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
|
||||
* the switch does not.
|
||||
*/
|
||||
ret &= BMSR_ERCAP;
|
||||
if (ksz_phy_id == PHY_ID_KSZ8051)
|
||||
if (ksz_8051)
|
||||
return ret;
|
||||
else
|
||||
return !ret;
|
||||
@@ -426,7 +426,7 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
|
||||
|
||||
static int ksz8051_match_phy_device(struct phy_device *phydev)
|
||||
{
|
||||
return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
|
||||
return ksz8051_ksz8795_match_phy_device(phydev, true);
|
||||
}
|
||||
|
||||
static int ksz8081_config_init(struct phy_device *phydev)
|
||||
@@ -535,7 +535,7 @@ static int ksz8061_config_init(struct phy_device *phydev)
|
||||
|
||||
static int ksz8795_match_phy_device(struct phy_device *phydev)
|
||||
{
|
||||
return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
|
||||
return ksz8051_ksz8795_match_phy_device(phydev, false);
|
||||
}
|
||||
|
||||
static int ksz9021_load_values_from_of(struct phy_device *phydev,
|
||||
|
||||
@@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
||||
{
|
||||
struct phy_device *phydev = dev->net->phydev;
|
||||
struct ethtool_link_ksettings ecmd;
|
||||
int ladv, radv, ret;
|
||||
int ladv, radv, ret, link;
|
||||
u32 buf;
|
||||
|
||||
/* clear LAN78xx interrupt status */
|
||||
@@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
||||
if (unlikely(ret < 0))
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phy_read_status(phydev);
|
||||
link = phydev->link;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
if (!phydev->link && dev->link_on) {
|
||||
if (!link && dev->link_on) {
|
||||
dev->link_on = false;
|
||||
|
||||
/* reset MAC */
|
||||
@@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
||||
return -EIO;
|
||||
|
||||
del_timer(&dev->stat_monitor);
|
||||
} else if (phydev->link && !dev->link_on) {
|
||||
} else if (link && !dev->link_on) {
|
||||
dev->link_on = true;
|
||||
|
||||
phy_ethtool_ksettings_get(phydev, &ecmd);
|
||||
@@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
|
||||
|
||||
static u32 lan78xx_get_link(struct net_device *net)
|
||||
{
|
||||
phy_read_status(net->phydev);
|
||||
u32 link;
|
||||
|
||||
return net->phydev->link;
|
||||
mutex_lock(&net->phydev->lock);
|
||||
phy_read_status(net->phydev);
|
||||
link = net->phydev->link;
|
||||
mutex_unlock(&net->phydev->lock);
|
||||
|
||||
return link;
|
||||
}
|
||||
|
||||
static void lan78xx_get_drvinfo(struct net_device *net,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user