You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Account for extra headroom in ath9k driver, from Felix Fietkau.
2) Fix OOPS in pppoe driver due to incorrect socket state transition,
from Guillaume Nault.
3) Kill memory leak in amd-xgbe debugfx, from Geliang Tang.
4) Power management fixes for iwlwifi, from Johannes Berg.
5) Fix races in reqsk_queue_unlink(), from Eric Dumazet.
6) Fix dst_entry usage in ARP replies, from Jiri Benc.
7) Cure OOPSes with SO_GET_FILTER, from Daniel Borkmann.
8) Missing allocation failure check in amd-xgbe, from Tom Lendacky.
9) Various resource allocation/freeing cures in DSA< from Neil
Armstrong.
10) A series of bug fixes in the openvswitch conntrack support, from
Joe Stringer.
11) Fix two cases (BPF and act_mirred) where we have to clean the sender
cpu stored in the SKB before transmitting. From WANG Cong and
Alexei Starovoitov.
12) Disable VLAN filtering in promiscuous mode in mlx5 driver, from
Achiad Shochat.
13) Older bnx2x chips cannot do 4-tuple UDP hashing, so prevent this
configuration via ethtool. From Yuval Mintz.
14) Don't call rt6_uncached_list_flush_dev() from rt6_ifdown() when
'dev' is NULL, from Eric Biederman.
15) Prevent stalled link synchronization in tipc, from Jon Paul Maloy.
16) kcalloc() gstrings ethtool buffer before having driver fill it in,
in order to prevent kernel memory leaking. From Joe Perches.
17) Fix mixxing rt6_info initialization for blackhole routes, from
Martin KaFai Lau.
18) Kill VLAN regression in via-rhine, from Andrej Ota.
19) Missing pfmemalloc check in sk_add_backlog(), from Eric Dumazet.
20) Fix spurious MSG_TRUNC signalling in netlink dumps, from Ronen Arad.
21) Scrube SKBs when pushing them between namespaces in openvswitch,
from Joe Stringer.
22) bcmgenet enables link interrupts too early, fix from Florian
Fainelli.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (92 commits)
net: bcmgenet: Fix early link interrupt enabling
tunnels: Don't require remote endpoint or ID during creation.
openvswitch: Scrub skb between namespaces
xen-netback: correctly check failed allocation
net: asix: add support for the Billionton GUSB2AM-1G-B USB adapter
netlink: Trim skb to alloc size to avoid MSG_TRUNC
net: add pfmemalloc check in sk_add_backlog()
via-rhine: fix VLAN receive handling regression.
ipv6: Initialize rt6_info properly in ip6_blackhole_route()
ipv6: Move common init code for rt6_info to a new function rt6_info_init()
Bluetooth: Fix initializing conn_params in scan phase
Bluetooth: Fix conn_params list update in hci_connect_le_scan_cleanup
Bluetooth: Fix remove_device behavior for explicit connects
Bluetooth: Fix LE reconnection logic
Bluetooth: Fix reference counting for LE-scan based connections
Bluetooth: Fix double scan updates
mlxsw: core: Fix race condition in __mlxsw_emad_transmit
tipc: move fragment importance field to new header position
ethtool: Use kcalloc instead of kmalloc for ethtool_get_strings
tipc: eliminate risk of stalled link synchronization
...
This commit is contained in:
@@ -6793,7 +6793,6 @@ F: drivers/scsi/megaraid/
|
|||||||
|
|
||||||
MELLANOX ETHERNET DRIVER (mlx4_en)
|
MELLANOX ETHERNET DRIVER (mlx4_en)
|
||||||
M: Amir Vadai <amirv@mellanox.com>
|
M: Amir Vadai <amirv@mellanox.com>
|
||||||
M: Ido Shamay <idos@mellanox.com>
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: http://www.mellanox.com
|
W: http://www.mellanox.com
|
||||||
|
|||||||
@@ -614,6 +614,7 @@ load_common:
|
|||||||
case BPF_LD | BPF_B | BPF_IND:
|
case BPF_LD | BPF_B | BPF_IND:
|
||||||
load_order = 0;
|
load_order = 0;
|
||||||
load_ind:
|
load_ind:
|
||||||
|
update_on_xread(ctx);
|
||||||
OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
|
OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
|
||||||
goto load_common;
|
goto load_common;
|
||||||
case BPF_LDX | BPF_IMM:
|
case BPF_LDX | BPF_IMM:
|
||||||
|
|||||||
@@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
|
|||||||
{PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
{PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
||||||
{PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
{PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
||||||
{PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
{PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
||||||
|
{PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
||||||
#ifdef CONFIG_CAN_PEAK_PCIEC
|
#ifdef CONFIG_CAN_PEAK_PCIEC
|
||||||
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
||||||
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
|
||||||
|
|||||||
@@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
|
|||||||
pdata->debugfs_xpcs_reg = 0;
|
pdata->debugfs_xpcs_reg = 0;
|
||||||
|
|
||||||
buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
|
buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
|
||||||
|
if (!buf)
|
||||||
|
return;
|
||||||
|
|
||||||
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
|
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
|
||||||
if (!pdata->xgbe_debugfs) {
|
if (!pdata->xgbe_debugfs) {
|
||||||
netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
|
netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
|
||||||
|
kfree(buf);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3351,6 +3351,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
|
|||||||
udp_rss_requested = 0;
|
udp_rss_requested = 0;
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (CHIP_IS_E1x(bp) && udp_rss_requested) {
|
||||||
|
DP(BNX2X_MSG_ETHTOOL,
|
||||||
|
"57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if ((info->flow_type == UDP_V4_FLOW) &&
|
if ((info->flow_type == UDP_V4_FLOW) &&
|
||||||
(bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
|
(bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
|
||||||
bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
|
bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
|
||||||
|
|||||||
@@ -1683,6 +1683,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
|
|||||||
bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
|
bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
|
||||||
|
{
|
||||||
|
u32 int0_enable = 0;
|
||||||
|
|
||||||
|
/* Monitor cable plug/unplugged event for internal PHY, external PHY
|
||||||
|
* and MoCA PHY
|
||||||
|
*/
|
||||||
|
if (priv->internal_phy) {
|
||||||
|
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
||||||
|
} else if (priv->ext_phy) {
|
||||||
|
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
||||||
|
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
|
||||||
|
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
|
||||||
|
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
||||||
|
}
|
||||||
|
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
|
||||||
|
}
|
||||||
|
|
||||||
static int init_umac(struct bcmgenet_priv *priv)
|
static int init_umac(struct bcmgenet_priv *priv)
|
||||||
{
|
{
|
||||||
struct device *kdev = &priv->pdev->dev;
|
struct device *kdev = &priv->pdev->dev;
|
||||||
@@ -1723,15 +1741,8 @@ static int init_umac(struct bcmgenet_priv *priv)
|
|||||||
/* Enable Tx default queue 16 interrupts */
|
/* Enable Tx default queue 16 interrupts */
|
||||||
int0_enable |= UMAC_IRQ_TXDMA_DONE;
|
int0_enable |= UMAC_IRQ_TXDMA_DONE;
|
||||||
|
|
||||||
/* Monitor cable plug/unplugged event for internal PHY */
|
/* Configure backpressure vectors for MoCA */
|
||||||
if (priv->internal_phy) {
|
if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
|
||||||
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
|
||||||
} else if (priv->ext_phy) {
|
|
||||||
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
|
||||||
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
|
|
||||||
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
|
|
||||||
int0_enable |= UMAC_IRQ_LINK_EVENT;
|
|
||||||
|
|
||||||
reg = bcmgenet_bp_mc_get(priv);
|
reg = bcmgenet_bp_mc_get(priv);
|
||||||
reg |= BIT(priv->hw_params->bp_in_en_shift);
|
reg |= BIT(priv->hw_params->bp_in_en_shift);
|
||||||
|
|
||||||
@@ -2645,6 +2656,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
|
|||||||
|
|
||||||
netif_tx_start_all_queues(dev);
|
netif_tx_start_all_queues(dev);
|
||||||
|
|
||||||
|
/* Monitor link interrupts now */
|
||||||
|
bcmgenet_link_intr_enable(priv);
|
||||||
|
|
||||||
phy_start(priv->phydev);
|
phy_start(priv->phydev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -592,6 +592,7 @@ struct be_adapter {
|
|||||||
int be_get_temp_freq;
|
int be_get_temp_freq;
|
||||||
struct be_hwmon hwmon_info;
|
struct be_hwmon hwmon_info;
|
||||||
u8 pf_number;
|
u8 pf_number;
|
||||||
|
u8 pci_func_num;
|
||||||
struct rss_info rss_info;
|
struct rss_info rss_info;
|
||||||
/* Filters for packets that need to be sent to BMC */
|
/* Filters for packets that need to be sent to BMC */
|
||||||
u32 bmc_filt_mask;
|
u32 bmc_filt_mask;
|
||||||
|
|||||||
@@ -851,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
|
|||||||
return status;
|
return status;
|
||||||
|
|
||||||
dest_wrb = be_cmd_copy(adapter, wrb);
|
dest_wrb = be_cmd_copy(adapter, wrb);
|
||||||
if (!dest_wrb)
|
if (!dest_wrb) {
|
||||||
return -EBUSY;
|
status = -EBUSY;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
if (use_mcc(adapter))
|
if (use_mcc(adapter))
|
||||||
status = be_mcc_notify_wait(adapter);
|
status = be_mcc_notify_wait(adapter);
|
||||||
@@ -862,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
|
|||||||
if (!status)
|
if (!status)
|
||||||
memcpy(wrb, dest_wrb, sizeof(*wrb));
|
memcpy(wrb, dest_wrb, sizeof(*wrb));
|
||||||
|
|
||||||
|
unlock:
|
||||||
be_cmd_unlock(adapter);
|
be_cmd_unlock(adapter);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@@ -1984,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
|
|||||||
be_if_cap_flags(adapter));
|
be_if_cap_flags(adapter));
|
||||||
}
|
}
|
||||||
flags &= be_if_cap_flags(adapter);
|
flags &= be_if_cap_flags(adapter);
|
||||||
|
if (!flags)
|
||||||
|
return -ENOTSUPP;
|
||||||
|
|
||||||
return __be_cmd_rx_filter(adapter, flags, value);
|
return __be_cmd_rx_filter(adapter, flags, value);
|
||||||
}
|
}
|
||||||
@@ -2887,6 +2892,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
|
|||||||
if (!status) {
|
if (!status) {
|
||||||
attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
|
attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
|
||||||
adapter->hba_port_num = attribs->hba_attribs.phy_port;
|
adapter->hba_port_num = attribs->hba_attribs.phy_port;
|
||||||
|
adapter->pci_func_num = attribs->pci_func_num;
|
||||||
serial_num = attribs->hba_attribs.controller_serial_number;
|
serial_num = attribs->hba_attribs.controller_serial_number;
|
||||||
for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
|
for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
|
||||||
adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
|
adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
|
||||||
@@ -3709,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
|
|||||||
status = -EINVAL;
|
status = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
adapter->pf_number = desc->pf_num;
|
adapter->pf_number = desc->pf_num;
|
||||||
be_copy_nic_desc(res, desc);
|
be_copy_nic_desc(res, desc);
|
||||||
}
|
}
|
||||||
@@ -3721,7 +3726,10 @@ err:
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Will use MBOX only if MCCQ has not been created */
|
/* Will use MBOX only if MCCQ has not been created
|
||||||
|
* non-zero domain => a PF is querying this on behalf of a VF
|
||||||
|
* zero domain => a PF or a VF is querying this for itself
|
||||||
|
*/
|
||||||
int be_cmd_get_profile_config(struct be_adapter *adapter,
|
int be_cmd_get_profile_config(struct be_adapter *adapter,
|
||||||
struct be_resources *res, u8 query, u8 domain)
|
struct be_resources *res, u8 query, u8 domain)
|
||||||
{
|
{
|
||||||
@@ -3748,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
|
|||||||
OPCODE_COMMON_GET_PROFILE_CONFIG,
|
OPCODE_COMMON_GET_PROFILE_CONFIG,
|
||||||
cmd.size, &wrb, &cmd);
|
cmd.size, &wrb, &cmd);
|
||||||
|
|
||||||
req->hdr.domain = domain;
|
|
||||||
if (!lancer_chip(adapter))
|
if (!lancer_chip(adapter))
|
||||||
req->hdr.version = 1;
|
req->hdr.version = 1;
|
||||||
req->type = ACTIVE_PROFILE_TYPE;
|
req->type = ACTIVE_PROFILE_TYPE;
|
||||||
|
/* When a function is querying profile information relating to
|
||||||
|
* itself hdr.pf_number must be set to it's pci_func_num + 1
|
||||||
|
*/
|
||||||
|
req->hdr.domain = domain;
|
||||||
|
if (domain == 0)
|
||||||
|
req->hdr.pf_num = adapter->pci_func_num + 1;
|
||||||
|
|
||||||
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
|
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
|
||||||
* descriptors with all bits set to "1" for the fields which can be
|
* descriptors with all bits set to "1" for the fields which can be
|
||||||
@@ -3921,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter,
|
|||||||
vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
|
vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
|
||||||
BE_IF_FLAGS_DEFQ_RSS);
|
BE_IF_FLAGS_DEFQ_RSS);
|
||||||
}
|
}
|
||||||
|
|
||||||
nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
|
|
||||||
} else {
|
} else {
|
||||||
num_vf_qs = 1;
|
num_vf_qs = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
|
||||||
|
nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
|
||||||
|
vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
|
||||||
|
}
|
||||||
|
|
||||||
|
nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
|
||||||
nic_vft->rq_count = cpu_to_le16(num_vf_qs);
|
nic_vft->rq_count = cpu_to_le16(num_vf_qs);
|
||||||
nic_vft->txq_count = cpu_to_le16(num_vf_qs);
|
nic_vft->txq_count = cpu_to_le16(num_vf_qs);
|
||||||
nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
|
nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
|
||||||
|
|||||||
@@ -289,7 +289,9 @@ struct be_cmd_req_hdr {
|
|||||||
u32 timeout; /* dword 1 */
|
u32 timeout; /* dword 1 */
|
||||||
u32 request_length; /* dword 2 */
|
u32 request_length; /* dword 2 */
|
||||||
u8 version; /* dword 3 */
|
u8 version; /* dword 3 */
|
||||||
u8 rsvd[3]; /* dword 3 */
|
u8 rsvd1; /* dword 3 */
|
||||||
|
u8 pf_num; /* dword 3 */
|
||||||
|
u8 rsvd2; /* dword 3 */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
|
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
|
||||||
@@ -1652,7 +1654,11 @@ struct mgmt_hba_attribs {
|
|||||||
|
|
||||||
struct mgmt_controller_attrib {
|
struct mgmt_controller_attrib {
|
||||||
struct mgmt_hba_attribs hba_attribs;
|
struct mgmt_hba_attribs hba_attribs;
|
||||||
u32 rsvd0[10];
|
u32 rsvd0[2];
|
||||||
|
u16 rsvd1;
|
||||||
|
u8 pci_func_num;
|
||||||
|
u8 rsvd2;
|
||||||
|
u32 rsvd3[7];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
struct be_cmd_req_cntl_attribs {
|
struct be_cmd_req_cntl_attribs {
|
||||||
|
|||||||
@@ -1123,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct be_wrb_params *wrb_params)
|
struct be_wrb_params *wrb_params)
|
||||||
{
|
{
|
||||||
/* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
|
/* Lancer, SH and BE3 in SRIOV mode have a bug wherein
|
||||||
* less may cause a transmit stall on that port. So the work-around is
|
* packets that are 32b or less may cause a transmit stall
|
||||||
* to pad short packets (<= 32 bytes) to a 36-byte length.
|
* on that port. The workaround is to pad such packets
|
||||||
|
* (len <= 32 bytes) to a minimum length of 36b.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
|
if (skb->len <= 32) {
|
||||||
if (skb_put_padto(skb, 36))
|
if (skb_put_padto(skb, 36))
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@@ -4205,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter)
|
|||||||
int status, level;
|
int status, level;
|
||||||
u16 profile_id;
|
u16 profile_id;
|
||||||
|
|
||||||
status = be_cmd_get_cntl_attributes(adapter);
|
|
||||||
if (status)
|
|
||||||
return status;
|
|
||||||
|
|
||||||
status = be_cmd_query_fw_cfg(adapter);
|
status = be_cmd_query_fw_cfg(adapter);
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
@@ -4407,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter)
|
|||||||
if (!lancer_chip(adapter))
|
if (!lancer_chip(adapter))
|
||||||
be_cmd_req_native_mode(adapter);
|
be_cmd_req_native_mode(adapter);
|
||||||
|
|
||||||
|
/* Need to invoke this cmd first to get the PCI Function Number */
|
||||||
|
status = be_cmd_get_cntl_attributes(adapter);
|
||||||
|
if (status)
|
||||||
|
return status;
|
||||||
|
|
||||||
if (!BE2_chip(adapter) && be_physfn(adapter))
|
if (!BE2_chip(adapter) && be_physfn(adapter))
|
||||||
be_alloc_sriov_res(adapter);
|
be_alloc_sriov_res(adapter);
|
||||||
|
|
||||||
@@ -4999,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (fhdr->asic_type_rev >= adapter->asic_rev);
|
/* In BE3 FW images the "asic_type_rev" field doesn't track the
|
||||||
|
* asic_rev of the chips it is compatible with.
|
||||||
|
* When asic_type_rev is 0 the image is compatible only with
|
||||||
|
* pre-BE3-R chips (asic_rev < 0x10)
|
||||||
|
*/
|
||||||
|
if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
|
||||||
|
return adapter->asic_rev < 0x10;
|
||||||
|
else
|
||||||
|
return (fhdr->asic_type_rev >= adapter->asic_rev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
|
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
|
||||||
|
|||||||
@@ -198,17 +198,28 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
|
|||||||
|
|
||||||
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
|
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
|
||||||
/*
|
/*
|
||||||
|
* Return the TBIPA address, starting from the address
|
||||||
|
* of the mapped GFAR MDIO registers (struct gfar)
|
||||||
* This is mildly evil, but so is our hardware for doing this.
|
* This is mildly evil, but so is our hardware for doing this.
|
||||||
* Also, we have to cast back to struct gfar because of
|
* Also, we have to cast back to struct gfar because of
|
||||||
* definition weirdness done in gianfar.h.
|
* definition weirdness done in gianfar.h.
|
||||||
*/
|
*/
|
||||||
static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
|
static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
|
||||||
{
|
{
|
||||||
struct gfar __iomem *enet_regs = p;
|
struct gfar __iomem *enet_regs = p;
|
||||||
|
|
||||||
return &enet_regs->tbipa;
|
return &enet_regs->tbipa;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the TBIPA address, starting from the address
|
||||||
|
* of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
|
||||||
|
*/
|
||||||
|
static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
|
||||||
|
{
|
||||||
|
return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the TBIPAR address for an eTSEC2 node
|
* Return the TBIPAR address for an eTSEC2 node
|
||||||
*/
|
*/
|
||||||
@@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
|
|||||||
|
|
||||||
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
|
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
|
||||||
/*
|
/*
|
||||||
* Return the TBIPAR address for a QE MDIO node
|
* Return the TBIPAR address for a QE MDIO node, starting from the address
|
||||||
|
* of the mapped MII registers (struct fsl_pq_mii)
|
||||||
*/
|
*/
|
||||||
static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
|
static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
|
||||||
{
|
{
|
||||||
struct fsl_pq_mdio __iomem *mdio = p;
|
struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
|
||||||
|
|
||||||
return &mdio->utbipar;
|
return &mdio->utbipar;
|
||||||
}
|
}
|
||||||
@@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
|
|||||||
.compatible = "fsl,gianfar-tbi",
|
.compatible = "fsl,gianfar-tbi",
|
||||||
.data = &(struct fsl_pq_mdio_data) {
|
.data = &(struct fsl_pq_mdio_data) {
|
||||||
.mii_offset = 0,
|
.mii_offset = 0,
|
||||||
.get_tbipa = get_gfar_tbipa,
|
.get_tbipa = get_gfar_tbipa_from_mii,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.compatible = "fsl,gianfar-mdio",
|
.compatible = "fsl,gianfar-mdio",
|
||||||
.data = &(struct fsl_pq_mdio_data) {
|
.data = &(struct fsl_pq_mdio_data) {
|
||||||
.mii_offset = 0,
|
.mii_offset = 0,
|
||||||
.get_tbipa = get_gfar_tbipa,
|
.get_tbipa = get_gfar_tbipa_from_mii,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
|
|||||||
.compatible = "gianfar",
|
.compatible = "gianfar",
|
||||||
.data = &(struct fsl_pq_mdio_data) {
|
.data = &(struct fsl_pq_mdio_data) {
|
||||||
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
|
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
|
||||||
.get_tbipa = get_gfar_tbipa,
|
.get_tbipa = get_gfar_tbipa_from_mdio,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
tbipa = data->get_tbipa(priv->map);
|
tbipa = data->get_tbipa(priv->map);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add consistency check to make sure TBI is contained
|
||||||
|
* within the mapped range (not because we would get a
|
||||||
|
* segfault, rather to catch bugs in computing TBI
|
||||||
|
* address). Print error message but continue anyway.
|
||||||
|
*/
|
||||||
|
if ((void *)tbipa > priv->map + resource_size(&res) - 4)
|
||||||
|
dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
|
||||||
|
((void *)tbipa - priv->map) + 4);
|
||||||
|
|
||||||
iowrite32be(be32_to_cpup(prop), tbipa);
|
iowrite32be(be32_to_cpup(prop), tbipa);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
|
|||||||
|
|
||||||
hw->aq.asq.next_to_use = 0;
|
hw->aq.asq.next_to_use = 0;
|
||||||
hw->aq.asq.next_to_clean = 0;
|
hw->aq.asq.next_to_clean = 0;
|
||||||
hw->aq.asq.count = hw->aq.num_asq_entries;
|
|
||||||
|
|
||||||
/* allocate the ring memory */
|
/* allocate the ring memory */
|
||||||
ret_code = i40e_alloc_adminq_asq_ring(hw);
|
ret_code = i40e_alloc_adminq_asq_ring(hw);
|
||||||
@@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
|
|||||||
goto init_adminq_free_rings;
|
goto init_adminq_free_rings;
|
||||||
|
|
||||||
/* success! */
|
/* success! */
|
||||||
|
hw->aq.asq.count = hw->aq.num_asq_entries;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
|
|
||||||
init_adminq_free_rings:
|
init_adminq_free_rings:
|
||||||
@@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
|
|||||||
|
|
||||||
hw->aq.arq.next_to_use = 0;
|
hw->aq.arq.next_to_use = 0;
|
||||||
hw->aq.arq.next_to_clean = 0;
|
hw->aq.arq.next_to_clean = 0;
|
||||||
hw->aq.arq.count = hw->aq.num_arq_entries;
|
|
||||||
|
|
||||||
/* allocate the ring memory */
|
/* allocate the ring memory */
|
||||||
ret_code = i40e_alloc_adminq_arq_ring(hw);
|
ret_code = i40e_alloc_adminq_arq_ring(hw);
|
||||||
@@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
|
|||||||
goto init_adminq_free_rings;
|
goto init_adminq_free_rings;
|
||||||
|
|
||||||
/* success! */
|
/* success! */
|
||||||
|
hw->aq.arq.count = hw->aq.num_arq_entries;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
|
|
||||||
init_adminq_free_rings:
|
init_adminq_free_rings:
|
||||||
|
|||||||
@@ -8389,6 +8389,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
|||||||
|
|
||||||
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
|
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
|
||||||
NETIF_F_GSO_UDP_TUNNEL |
|
NETIF_F_GSO_UDP_TUNNEL |
|
||||||
|
NETIF_F_GSO_GRE |
|
||||||
NETIF_F_TSO;
|
NETIF_F_TSO;
|
||||||
|
|
||||||
netdev->features = NETIF_F_SG |
|
netdev->features = NETIF_F_SG |
|
||||||
@@ -8396,6 +8397,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
|||||||
NETIF_F_SCTP_CSUM |
|
NETIF_F_SCTP_CSUM |
|
||||||
NETIF_F_HIGHDMA |
|
NETIF_F_HIGHDMA |
|
||||||
NETIF_F_GSO_UDP_TUNNEL |
|
NETIF_F_GSO_UDP_TUNNEL |
|
||||||
|
NETIF_F_GSO_GRE |
|
||||||
NETIF_F_HW_VLAN_CTAG_TX |
|
NETIF_F_HW_VLAN_CTAG_TX |
|
||||||
NETIF_F_HW_VLAN_CTAG_RX |
|
NETIF_F_HW_VLAN_CTAG_RX |
|
||||||
NETIF_F_HW_VLAN_CTAG_FILTER |
|
NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||||
|
|||||||
@@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
|
|||||||
|
|
||||||
hw->aq.asq.next_to_use = 0;
|
hw->aq.asq.next_to_use = 0;
|
||||||
hw->aq.asq.next_to_clean = 0;
|
hw->aq.asq.next_to_clean = 0;
|
||||||
hw->aq.asq.count = hw->aq.num_asq_entries;
|
|
||||||
|
|
||||||
/* allocate the ring memory */
|
/* allocate the ring memory */
|
||||||
ret_code = i40e_alloc_adminq_asq_ring(hw);
|
ret_code = i40e_alloc_adminq_asq_ring(hw);
|
||||||
@@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
|
|||||||
goto init_adminq_free_rings;
|
goto init_adminq_free_rings;
|
||||||
|
|
||||||
/* success! */
|
/* success! */
|
||||||
|
hw->aq.asq.count = hw->aq.num_asq_entries;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
|
|
||||||
init_adminq_free_rings:
|
init_adminq_free_rings:
|
||||||
@@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
|
|||||||
|
|
||||||
hw->aq.arq.next_to_use = 0;
|
hw->aq.arq.next_to_use = 0;
|
||||||
hw->aq.arq.next_to_clean = 0;
|
hw->aq.arq.next_to_clean = 0;
|
||||||
hw->aq.arq.count = hw->aq.num_arq_entries;
|
|
||||||
|
|
||||||
/* allocate the ring memory */
|
/* allocate the ring memory */
|
||||||
ret_code = i40e_alloc_adminq_arq_ring(hw);
|
ret_code = i40e_alloc_adminq_arq_ring(hw);
|
||||||
@@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
|
|||||||
goto init_adminq_free_rings;
|
goto init_adminq_free_rings;
|
||||||
|
|
||||||
/* success! */
|
/* success! */
|
||||||
|
hw->aq.arq.count = hw->aq.num_arq_entries;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
|
|
||||||
init_adminq_free_rings:
|
init_adminq_free_rings:
|
||||||
|
|||||||
@@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
|
|||||||
* and performing a NOP command
|
* and performing a NOP command
|
||||||
*/
|
*/
|
||||||
for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
|
for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
|
||||||
|
/* Make sure request_irq was called */
|
||||||
|
if (!priv->eq_table.eq[i].have_irq)
|
||||||
|
continue;
|
||||||
|
|
||||||
/* Temporary use polling for command completions */
|
/* Temporary use polling for command completions */
|
||||||
mlx4_cmd_use_polling(dev);
|
mlx4_cmd_use_polling(dev);
|
||||||
|
|
||||||
|
|||||||
@@ -2669,14 +2669,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
|||||||
|
|
||||||
if (msi_x) {
|
if (msi_x) {
|
||||||
int nreq = dev->caps.num_ports * num_online_cpus() + 1;
|
int nreq = dev->caps.num_ports * num_online_cpus() + 1;
|
||||||
bool shared_ports = false;
|
|
||||||
|
|
||||||
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
|
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
|
||||||
nreq);
|
nreq);
|
||||||
if (nreq > MAX_MSIX) {
|
if (nreq > MAX_MSIX)
|
||||||
nreq = MAX_MSIX;
|
nreq = MAX_MSIX;
|
||||||
shared_ports = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
|
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
|
||||||
if (!entries)
|
if (!entries)
|
||||||
@@ -2699,9 +2696,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
|||||||
bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
|
bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
|
||||||
dev->caps.num_ports);
|
dev->caps.num_ports);
|
||||||
|
|
||||||
if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
|
|
||||||
shared_ports = true;
|
|
||||||
|
|
||||||
for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
|
for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
|
||||||
if (i == MLX4_EQ_ASYNC)
|
if (i == MLX4_EQ_ASYNC)
|
||||||
continue;
|
continue;
|
||||||
@@ -2709,7 +2703,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
|||||||
priv->eq_table.eq[i].irq =
|
priv->eq_table.eq[i].irq =
|
||||||
entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
|
entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
|
||||||
|
|
||||||
if (shared_ports) {
|
if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
|
||||||
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
|
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
|
||||||
dev->caps.num_ports);
|
dev->caps.num_ports);
|
||||||
/* We don't set affinity hint when there
|
/* We don't set affinity hint when there
|
||||||
|
|||||||
@@ -598,6 +598,8 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
priv->vlan.filter_disabled = false;
|
priv->vlan.filter_disabled = false;
|
||||||
|
if (priv->netdev->flags & IFF_PROMISC)
|
||||||
|
return;
|
||||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -607,6 +609,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
priv->vlan.filter_disabled = true;
|
priv->vlan.filter_disabled = true;
|
||||||
|
if (priv->netdev->flags & IFF_PROMISC)
|
||||||
|
return;
|
||||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
|
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -717,8 +721,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
|
|||||||
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
|
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
|
||||||
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
|
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
|
||||||
|
|
||||||
if (enable_promisc)
|
if (enable_promisc) {
|
||||||
mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
|
mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
|
||||||
|
if (!priv->vlan.filter_disabled)
|
||||||
|
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
|
||||||
|
0);
|
||||||
|
}
|
||||||
if (enable_allmulti)
|
if (enable_allmulti)
|
||||||
mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
|
mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
|
||||||
if (enable_broadcast)
|
if (enable_broadcast)
|
||||||
@@ -730,8 +738,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
|
|||||||
mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
|
mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
|
||||||
if (disable_allmulti)
|
if (disable_allmulti)
|
||||||
mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
|
mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
|
||||||
if (disable_promisc)
|
if (disable_promisc) {
|
||||||
|
if (!priv->vlan.filter_disabled)
|
||||||
|
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
|
||||||
|
0);
|
||||||
mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
|
mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
|
||||||
|
}
|
||||||
|
|
||||||
ea->promisc_enabled = promisc_enabled;
|
ea->promisc_enabled = promisc_enabled;
|
||||||
ea->allmulti_enabled = allmulti_enabled;
|
ea->allmulti_enabled = allmulti_enabled;
|
||||||
|
|||||||
@@ -311,7 +311,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
memset(in, 0, sizeof(in));
|
memset(in, 0, sizeof(in));
|
||||||
MLX5_SET(ptys_reg, in, local_port, local_port);
|
MLX5_SET(pvlc_reg, in, local_port, local_port);
|
||||||
|
|
||||||
err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
|
err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
|
||||||
pvlc_size, MLX5_REG_PVLC, 0, 0);
|
pvlc_size, MLX5_REG_PVLC, 0, 0);
|
||||||
|
|||||||
@@ -374,26 +374,31 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
|
|||||||
int err;
|
int err;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
mlxsw_core->emad.trans_active = true;
|
||||||
|
|
||||||
err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
|
err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
|
dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
|
||||||
mlxsw_core->emad.tid);
|
mlxsw_core->emad.tid);
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return err;
|
goto trans_inactive_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mlxsw_core->emad.trans_active = true;
|
|
||||||
ret = wait_event_timeout(mlxsw_core->emad.wait,
|
ret = wait_event_timeout(mlxsw_core->emad.wait,
|
||||||
!(mlxsw_core->emad.trans_active),
|
!(mlxsw_core->emad.trans_active),
|
||||||
msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
|
msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
|
dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
|
||||||
mlxsw_core->emad.tid);
|
mlxsw_core->emad.tid);
|
||||||
mlxsw_core->emad.trans_active = false;
|
err = -EIO;
|
||||||
return -EIO;
|
goto trans_inactive_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
trans_inactive_out:
|
||||||
|
mlxsw_core->emad.trans_active = false;
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
|
static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
|
||||||
|
|||||||
@@ -187,6 +187,7 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
|
|||||||
{
|
{
|
||||||
u16 max_index, be_index;
|
u16 max_index, be_index;
|
||||||
u16 offset; /* byte offset inside the array */
|
u16 offset; /* byte offset inside the array */
|
||||||
|
u8 in_byte_index;
|
||||||
|
|
||||||
BUG_ON(index && !item->element_size);
|
BUG_ON(index && !item->element_size);
|
||||||
if (item->offset % sizeof(u32) != 0 ||
|
if (item->offset % sizeof(u32) != 0 ||
|
||||||
@@ -199,7 +200,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
|
|||||||
max_index = (item->size.bytes << 3) / item->element_size - 1;
|
max_index = (item->size.bytes << 3) / item->element_size - 1;
|
||||||
be_index = max_index - index;
|
be_index = max_index - index;
|
||||||
offset = be_index * item->element_size >> 3;
|
offset = be_index * item->element_size >> 3;
|
||||||
*shift = index % (BITS_PER_BYTE / item->element_size) << 1;
|
in_byte_index = index % (BITS_PER_BYTE / item->element_size);
|
||||||
|
*shift = in_byte_index * item->element_size;
|
||||||
|
|
||||||
return item->offset + offset;
|
return item->offset + offset;
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user