You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Ensure that mtu is at least IPV6_MIN_MTU in ipv6 VTI tunnel driver,
from Steffen Klassert.
2) Fix crashes when user tries to get_next_key on an LPM bpf map, from
Alexei Starovoitov.
3) Fix detection of VLAN fitlering feature for bnx2x VF devices, from
Michal Schmidt.
4) We can get a divide by zero when TCP socket are morphed into
listening state, fix from Eric Dumazet.
5) Fix socket refcounting bugs in skb_complete_wifi_ack() and
skb_complete_tx_timestamp(). From Eric Dumazet.
6) Use after free in dccp_feat_activate_values(), also from Eric
Dumazet.
7) Like bonding team needs to use ETH_MAX_MTU as netdev->max_mtu, from
Jarod Wilson.
8) Fix use after free in vrf_xmit(), from David Ahern.
9) Don't do UDP Fragmentation Offload on IPComp ipsec packets, from
Alexey Kodanev.
10) Properly check napi_complete_done() return value in order to decide
whether to re-enable IRQs or not in amd-xgbe driver, from Thomas
Lendacky.
11) Fix double free of hwmon device in marvell phy driver, from Andrew
Lunn.
12) Don't crash on malformed netlink attributes in act_connmark, from
Etienne Noss.
13) Don't remove routes with a higher metric in ipv6 ECMP route replace,
from Sabrina Dubroca.
14) Don't write into a cloned SKB in ipv6 fragmentation handling, from
Florian Westphal.
15) Fix routing redirect races in dccp and tcp, basically the ICMP
handler can't modify the socket's cached route in it's locked by the
user at this moment. From Jon Maxwell.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (108 commits)
qed: Enable iSCSI Out-of-Order
qed: Correct out-of-bound access in OOO history
qed: Fix interrupt flags on Rx LL2
qed: Free previous connections when releasing iSCSI
qed: Fix mapping leak on LL2 rx flow
qed: Prevent creation of too-big u32-chains
qed: Align CIDs according to DORQ requirement
mlxsw: reg: Fix SPVMLR max record count
mlxsw: reg: Fix SPVM max record count
net: Resend IGMP memberships upon peer notification.
dccp: fix memory leak during tear-down of unsuccessful connection request
tun: fix premature POLLOUT notification on tun devices
dccp/tcp: fix routing redirect race
ucc/hdlc: fix two little issue
vxlan: fix ovs support
net: use net->count to check whether a netns is alive or not
bridge: drop netfilter fake rtable unconditionally
ipv6: avoid write to a possibly cloned skb
net: wimax/i2400m: fix NULL-deref at probe
isdn/gigaset: fix NULL-deref at probe
...
This commit is contained in:
@@ -71,6 +71,9 @@
|
||||
For Axon it can be absent, though my current driver
|
||||
doesn't handle phy-address yet so for now, keep
|
||||
0x00ffffff in it.
|
||||
- phy-handle : Used to describe configurations where a external PHY
|
||||
is used. Please refer to:
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
|
||||
operations (if absent the value is the same as
|
||||
rx-fifo-size). For Axon, either absent or 2048.
|
||||
@@ -81,8 +84,22 @@
|
||||
offload, phandle of the TAH device node.
|
||||
- tah-channel : 1 cell, optional. If appropriate, channel used on the
|
||||
TAH engine.
|
||||
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
|
||||
managed entity. See
|
||||
Documentation/devicetree/bindings/net/fixed-link.txt
|
||||
for details.
|
||||
- mdio subnode : When the EMAC has a phy connected to its local
|
||||
mdio, which us supported by the kernel's network
|
||||
PHY library in drivers/net/phy, there must be device
|
||||
tree subnode with the following required properties:
|
||||
- #address-cells: Must be <1>.
|
||||
- #size-cells: Must be <0>.
|
||||
|
||||
Example:
|
||||
For PHY definitions: Please refer to
|
||||
Documentation/devicetree/bindings/net/phy.txt and
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
|
||||
Examples:
|
||||
|
||||
EMAC0: ethernet@40000800 {
|
||||
device_type = "network";
|
||||
@@ -104,6 +121,48 @@
|
||||
zmii-channel = <0>;
|
||||
};
|
||||
|
||||
EMAC1: ethernet@ef600c00 {
|
||||
device_type = "network";
|
||||
compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
|
||||
interrupt-parent = <&EMAC1>;
|
||||
interrupts = <0 1>;
|
||||
#interrupt-cells = <1>;
|
||||
#address-cells = <0>;
|
||||
#size-cells = <0>;
|
||||
interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
|
||||
1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
|
||||
reg = <0xef600c00 0x000000c4>;
|
||||
local-mac-address = [000000000000]; /* Filled in by U-Boot */
|
||||
mal-device = <&MAL0>;
|
||||
mal-tx-channel = <0>;
|
||||
mal-rx-channel = <0>;
|
||||
cell-index = <0>;
|
||||
max-frame-size = <9000>;
|
||||
rx-fifo-size = <16384>;
|
||||
tx-fifo-size = <2048>;
|
||||
fifo-entry-size = <10>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&phy0>;
|
||||
phy-map = <0x00000000>;
|
||||
rgmii-device = <&RGMII0>;
|
||||
rgmii-channel = <0>;
|
||||
tah-device = <&TAH0>;
|
||||
tah-channel = <0>;
|
||||
has-inverted-stacr-oc;
|
||||
has-new-stacr-staopc;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
ii) McMAL node
|
||||
|
||||
Required properties:
|
||||
@@ -145,4 +204,3 @@
|
||||
- revision : as provided by the RGMII new version register if
|
||||
available.
|
||||
For Axon: 0x0000012a
|
||||
|
||||
|
||||
@@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
|
||||
FALSE (router)
|
||||
|
||||
forwarding - BOOLEAN
|
||||
Enable IP forwarding on this interface.
|
||||
Enable IP forwarding on this interface. This controls whether packets
|
||||
received _on_ this interface can be forwarded.
|
||||
|
||||
mc_forwarding - BOOLEAN
|
||||
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
|
||||
|
||||
+5
-4
@@ -266,7 +266,7 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
const struct af_alg_type *type;
|
||||
@@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
if (!type)
|
||||
goto unlock;
|
||||
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
|
||||
err = -ENOMEM;
|
||||
if (!sk2)
|
||||
goto unlock;
|
||||
@@ -323,9 +323,10 @@ unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_accept);
|
||||
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
return af_alg_accept(sock->sk, newsock);
|
||||
return af_alg_accept(sock->sk, newsock, kern);
|
||||
}
|
||||
|
||||
static const struct proto_ops alg_proto_ops = {
|
||||
|
||||
+5
-4
@@ -239,7 +239,8 @@ unlock:
|
||||
return err ?: len;
|
||||
}
|
||||
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
@@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = af_alg_accept(ask->parent, newsock);
|
||||
err = af_alg_accept(ask->parent, newsock, kern);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
|
||||
}
|
||||
|
||||
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
int flags)
|
||||
int flags, bool kern)
|
||||
{
|
||||
int err;
|
||||
|
||||
@@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return hash_accept(sock, newsock, flags);
|
||||
return hash_accept(sock, newsock, flags, kern);
|
||||
}
|
||||
|
||||
static struct proto_ops algif_hash_ops_nokey = {
|
||||
|
||||
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (hostif->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
dev_info(&udev->dev,
|
||||
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
|
||||
__func__, le16_to_cpu(udev->descriptor.idVendor),
|
||||
|
||||
@@ -2272,10 +2272,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
|
||||
processed = xgbe_rx_poll(channel, budget);
|
||||
|
||||
/* If we processed everything, we are done */
|
||||
if (processed < budget) {
|
||||
/* Turn off polling */
|
||||
napi_complete_done(napi, processed);
|
||||
|
||||
if ((processed < budget) && napi_complete_done(napi, processed)) {
|
||||
/* Enable Tx and Rx interrupts */
|
||||
if (pdata->channel_irq_mode)
|
||||
xgbe_enable_rx_tx_int(pdata, channel);
|
||||
@@ -2317,10 +2314,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
|
||||
} while ((processed < budget) && (processed != last_processed));
|
||||
|
||||
/* If we processed everything, we are done */
|
||||
if (processed < budget) {
|
||||
/* Turn off polling */
|
||||
napi_complete_done(napi, processed);
|
||||
|
||||
if ((processed < budget) && napi_complete_done(napi, processed)) {
|
||||
/* Enable Tx and Rx interrupts */
|
||||
xgbe_enable_rx_tx_ints(pdata);
|
||||
}
|
||||
|
||||
@@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
|
||||
if (!((1U << i) & self->msix_entry_mask))
|
||||
continue;
|
||||
|
||||
free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
|
||||
if (pdev->msix_enabled)
|
||||
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
|
||||
free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
|
||||
self->msix_entry_mask &= ~(1U << i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
||||
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
|
||||
|
||||
/* VF with OLD Hypervisor or old PF do not support filtering */
|
||||
if (IS_PF(bp)) {
|
||||
if (chip_is_e1x)
|
||||
bp->accept_any_vlan = true;
|
||||
else
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
#endif
|
||||
}
|
||||
/* For VF we'll know whether to enable VLAN filtering after
|
||||
* getting a response to CHANNEL_TLV_ACQUIRE from PF.
|
||||
*/
|
||||
|
||||
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->features |= NETIF_F_HIGHDMA;
|
||||
@@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP adjfreq called while the interface is down\n");
|
||||
return -EFAULT;
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
if (ppb < 0) {
|
||||
@@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
{
|
||||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP adjtime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
|
||||
|
||||
timecounter_adjtime(&bp->timecounter, delta);
|
||||
@@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
||||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
u64 ns;
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP gettime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
ns = timecounter_read(&bp->timecounter);
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
|
||||
@@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
|
||||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
u64 ns;
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP settime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
ns = timespec64_to_ns(ts);
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
|
||||
@@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
||||
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
|
||||
if (rc)
|
||||
goto init_one_freemem;
|
||||
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
/* VF with OLD Hypervisor or old PF do not support filtering */
|
||||
if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Enable SRIOV if capability found in configuration space */
|
||||
|
||||
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
||||
|
||||
/* Add/Remove the filter */
|
||||
rc = bnx2x_config_vlan_mac(bp, &ramrod);
|
||||
if (rc && rc != -EEXIST) {
|
||||
if (rc == -EEXIST)
|
||||
return 0;
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to %s %s\n",
|
||||
filter->add ? "add" : "delete",
|
||||
(filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
|
||||
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
||||
return rc;
|
||||
}
|
||||
|
||||
filter->applied = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
/* Rollback if needed */
|
||||
if (i != filters->count) {
|
||||
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
|
||||
i, filters->count + 1);
|
||||
i, filters->count);
|
||||
while (--i >= 0) {
|
||||
if (!filters->filters[i].applied)
|
||||
continue;
|
||||
filters->filters[i].add = !filters->filters[i].add;
|
||||
bnx2x_vf_mac_vlan_config(bp, vf, qid,
|
||||
&filters->filters[i],
|
||||
@@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
||||
continue;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"add addresses for vf %d\n", vf->abs_vfid);
|
||||
for_each_vfq(vf, j) {
|
||||
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
|
||||
|
||||
@@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
||||
cpu_to_le32(U64_HI(q_stats_addr));
|
||||
cur_query_entry->address.lo =
|
||||
cpu_to_le32(U64_LO(q_stats_addr));
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"added address %x %x for vf %d queue %d client %d\n",
|
||||
cur_query_entry->address.hi,
|
||||
cur_query_entry->address.lo, cur_query_entry->funcID,
|
||||
j, cur_query_entry->index);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"added address %x %x for vf %d queue %d client %d\n",
|
||||
cur_query_entry->address.hi,
|
||||
cur_query_entry->address.lo,
|
||||
cur_query_entry->funcID,
|
||||
j, cur_query_entry->index);
|
||||
cur_query_entry++;
|
||||
cur_data_offset += sizeof(struct per_queue_stats);
|
||||
stats_count++;
|
||||
|
||||
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
|
||||
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
|
||||
|
||||
bool add;
|
||||
bool applied;
|
||||
u8 *mac;
|
||||
u16 vid;
|
||||
};
|
||||
|
||||
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
|
||||
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
|
||||
int rc, i = 0;
|
||||
int rc = 0, i = 0;
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
if (bp->state != BNX2X_STATE_OPEN) {
|
||||
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
/* Get Rx mode requested */
|
||||
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
|
||||
|
||||
/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
|
||||
if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"VF supports not more than %d multicast MAC addresses\n",
|
||||
PFVF_MAX_MULTICAST_PER_VF);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
netdev_for_each_mc_addr(ha, dev) {
|
||||
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
|
||||
bnx2x_mc_addr(ha));
|
||||
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
i++;
|
||||
}
|
||||
|
||||
/* We support four PFVF_MAX_MULTICAST_PER_VF mcast
|
||||
* addresses tops
|
||||
*/
|
||||
if (i >= PFVF_MAX_MULTICAST_PER_VF) {
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"VF supports not more than %d multicast MAC addresses\n",
|
||||
PFVF_MAX_MULTICAST_PER_VF);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req->n_multicast = i;
|
||||
req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
|
||||
req->vf_qid = 0;
|
||||
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
out:
|
||||
bnx2x_vfpf_finalize(bp, &req->first_tlv);
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* request pf to add a vlan for the vf */
|
||||
@@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
/* build vlan list */
|
||||
fl = NULL;
|
||||
|
||||
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
|
||||
VFPF_VLAN_FILTER);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
|
||||
if (fl) {
|
||||
/* set vlan list */
|
||||
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
|
||||
msg->vf_qid,
|
||||
false);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
|
||||
|
||||
@@ -4465,6 +4465,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
|
||||
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
|
||||
}
|
||||
#endif
|
||||
if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
|
||||
FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
|
||||
bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
|
||||
|
||||
switch (resp->port_partition_type) {
|
||||
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
|
||||
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
|
||||
@@ -5507,8 +5511,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
|
||||
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
|
||||
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
|
||||
}
|
||||
link_info->support_auto_speeds =
|
||||
le16_to_cpu(resp->supported_speeds_auto_mode);
|
||||
if (resp->supported_speeds_auto_mode)
|
||||
link_info->support_auto_speeds =
|
||||
le16_to_cpu(resp->supported_speeds_auto_mode);
|
||||
|
||||
hwrm_phy_qcaps_exit:
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
@@ -6495,8 +6500,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
|
||||
if (!silent)
|
||||
bnxt_dbg_dump_states(bp);
|
||||
if (netif_running(bp->dev)) {
|
||||
int rc;
|
||||
|
||||
if (!silent)
|
||||
bnxt_ulp_stop(bp);
|
||||
bnxt_close_nic(bp, false, false);
|
||||
bnxt_open_nic(bp, false, false);
|
||||
rc = bnxt_open_nic(bp, false, false);
|
||||
if (!silent && !rc)
|
||||
bnxt_ulp_start(bp);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7444,6 +7455,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_hwrm_func_reset(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
bnxt_hwrm_fw_set_time(bp);
|
||||
|
||||
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
|
||||
@@ -7554,10 +7569,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_hwrm_func_reset(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_init_int_mode(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
@@ -993,6 +993,7 @@ struct bnxt {
|
||||
BNXT_FLAG_ROCEV2_CAP)
|
||||
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
|
||||
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
|
||||
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
|
||||
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
|
||||
|
||||
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
|
||||
|
||||
@@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
|
||||
return;
|
||||
|
||||
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
|
||||
if (BNXT_PF(bp))
|
||||
if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
|
||||
else
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Broadcom GENET (Gigabit Ethernet) controller driver
|
||||
*
|
||||
* Copyright (c) 2014 Broadcom Corporation
|
||||
* Copyright (c) 2014-2017 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
|
||||
genet_dma_ring_regs[r]);
|
||||
}
|
||||
|
||||
static int bcmgenet_begin(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Turn on the clock */
|
||||
return clk_prepare_enable(priv->clk);
|
||||
}
|
||||
|
||||
static void bcmgenet_complete(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Turn off the clock */
|
||||
clk_disable_unprepare(priv->clk);
|
||||
}
|
||||
|
||||
static int bcmgenet_get_link_ksettings(struct net_device *dev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
@@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
|
||||
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
|
||||
/* Misc UniMAC counters */
|
||||
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
|
||||
UMAC_RBUF_OVFL_CNT),
|
||||
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
|
||||
UMAC_RBUF_OVFL_CNT_V1),
|
||||
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
|
||||
UMAC_RBUF_ERR_CNT_V1),
|
||||
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
|
||||
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
|
||||
@@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
|
||||
}
|
||||
}
|
||||
|
||||
static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
|
||||
{
|
||||
u16 new_offset;
|
||||
u32 val;
|
||||
|
||||
switch (offset) {
|
||||
case UMAC_RBUF_OVFL_CNT_V1:
|
||||
if (GENET_IS_V2(priv))
|
||||
new_offset = RBUF_OVFL_CNT_V2;
|
||||
else
|
||||
new_offset = RBUF_OVFL_CNT_V3PLUS;
|
||||
|
||||
val = bcmgenet_rbuf_readl(priv, new_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_rbuf_writel(priv, 0, new_offset);
|
||||
break;
|
||||
case UMAC_RBUF_ERR_CNT_V1:
|
||||
if (GENET_IS_V2(priv))
|
||||
new_offset = RBUF_ERR_CNT_V2;
|
||||
else
|
||||
new_offset = RBUF_ERR_CNT_V3PLUS;
|
||||
|
||||
val = bcmgenet_rbuf_readl(priv, new_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_rbuf_writel(priv, 0, new_offset);
|
||||
break;
|
||||
default:
|
||||
val = bcmgenet_umac_readl(priv, offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0, offset);
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
|
||||
{
|
||||
int i, j = 0;
|
||||
@@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
|
||||
case BCMGENET_STAT_NETDEV:
|
||||
case BCMGENET_STAT_SOFT:
|
||||
continue;
|
||||
case BCMGENET_STAT_MIB_RX:
|
||||
case BCMGENET_STAT_MIB_TX:
|
||||
case BCMGENET_STAT_RUNT:
|
||||
if (s->type != BCMGENET_STAT_MIB_RX)
|
||||
offset = BCMGENET_STAT_OFFSET;
|
||||
offset += BCMGENET_STAT_OFFSET;
|
||||
/* fall through */
|
||||
case BCMGENET_STAT_MIB_TX:
|
||||
offset += BCMGENET_STAT_OFFSET;
|
||||
/* fall through */
|
||||
case BCMGENET_STAT_MIB_RX:
|
||||
val = bcmgenet_umac_readl(priv,
|
||||
UMAC_MIB_START + j + offset);
|
||||
offset = 0; /* Reset Offset */
|
||||
break;
|
||||
case BCMGENET_STAT_MISC:
|
||||
val = bcmgenet_umac_readl(priv, s->reg_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0, s->reg_offset);
|
||||
if (GENET_IS_V1(priv)) {
|
||||
val = bcmgenet_umac_readl(priv, s->reg_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0,
|
||||
s->reg_offset);
|
||||
} else {
|
||||
val = bcmgenet_update_stat_misc(priv,
|
||||
s->reg_offset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
||||
|
||||
/* standard ethtool support functions. */
|
||||
static const struct ethtool_ops bcmgenet_ethtool_ops = {
|
||||
.begin = bcmgenet_begin,
|
||||
.complete = bcmgenet_complete,
|
||||
.get_strings = bcmgenet_get_strings,
|
||||
.get_sset_count = bcmgenet_get_sset_count,
|
||||
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
|
||||
@@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct device *kdev = &priv->pdev->dev;
|
||||
struct enet_cb *tx_cb_ptr;
|
||||
struct netdev_queue *txq;
|
||||
unsigned int pkts_compl = 0;
|
||||
unsigned int bytes_compl = 0;
|
||||
unsigned int c_index;
|
||||
@@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
dev->stats.tx_packets += pkts_compl;
|
||||
dev->stats.tx_bytes += bytes_compl;
|
||||
|
||||
txq = netdev_get_tx_queue(dev, ring->queue);
|
||||
netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
|
||||
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
|
||||
if (netif_tx_queue_stopped(txq))
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
|
||||
pkts_compl, bytes_compl);
|
||||
|
||||
return pkts_compl;
|
||||
}
|
||||
@@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
|
||||
struct bcmgenet_tx_ring *ring =
|
||||
container_of(napi, struct bcmgenet_tx_ring, napi);
|
||||
unsigned int work_done = 0;
|
||||
struct netdev_queue *txq;
|
||||
unsigned long flags;
|
||||
|
||||
work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
|
||||
txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
if (work_done == 0) {
|
||||
napi_complete(napi);
|
||||
@@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
|
||||
/* Interrupt bottom half */
|
||||
static void bcmgenet_irq_task(struct work_struct *work)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int status;
|
||||
struct bcmgenet_priv *priv = container_of(
|
||||
work, struct bcmgenet_priv, bcmgenet_irq_work);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
|
||||
priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
status = priv->irq0_stat;
|
||||
priv->irq0_stat = 0;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
if (status & UMAC_IRQ_MPD_R) {
|
||||
netif_dbg(priv, wol, priv->dev,
|
||||
"magic packet detected, waking up\n");
|
||||
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
|
||||
}
|
||||
|
||||
/* Link UP/DOWN event */
|
||||
if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
|
||||
if (status & UMAC_IRQ_LINK_EVENT)
|
||||
phy_mac_interrupt(priv->phydev,
|
||||
!!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
|
||||
priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
|
||||
}
|
||||
!!(status & UMAC_IRQ_LINK_UP));
|
||||
}
|
||||
|
||||
/* bcmgenet_isr1: handle Rx and Tx priority queues */
|
||||
@@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
|
||||
struct bcmgenet_priv *priv = dev_id;
|
||||
struct bcmgenet_rx_ring *rx_ring;
|
||||
struct bcmgenet_tx_ring *tx_ring;
|
||||
unsigned int index;
|
||||
unsigned int index, status;
|
||||
|
||||
/* Save irq status for bottom-half processing. */
|
||||
priv->irq1_stat =
|
||||
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
|
||||
/* Read irq status */
|
||||
status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
|
||||
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
|
||||
/* clear interrupts */
|
||||
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
|
||||
bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev,
|
||||
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
|
||||
"%s: IRQ=0x%x\n", __func__, status);
|
||||
|
||||
/* Check Rx priority queue interrupts */
|
||||
for (index = 0; index < priv->hw_params->rx_queues; index++) {
|
||||
if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
|
||||
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
|
||||
continue;
|
||||
|
||||
rx_ring = &priv->rx_rings[index];
|
||||
@@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
|
||||
|
||||
/* Check Tx priority queue interrupts */
|
||||
for (index = 0; index < priv->hw_params->tx_queues; index++) {
|
||||
if (!(priv->irq1_stat & BIT(index)))
|
||||
if (!(status & BIT(index)))
|
||||
continue;
|
||||
|
||||
tx_ring = &priv->tx_rings[index];
|
||||
@@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
||||
struct bcmgenet_priv *priv = dev_id;
|
||||
struct bcmgenet_rx_ring *rx_ring;
|
||||
struct bcmgenet_tx_ring *tx_ring;
|
||||
unsigned int status;
|
||||
unsigned long flags;
|
||||
|
||||
/* Save irq status for bottom-half processing. */
|
||||
priv->irq0_stat =
|
||||
bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
|
||||
/* Read irq status */
|
||||
status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
|
||||
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
|
||||
/* clear interrupts */
|
||||
bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
|
||||
bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev,
|
||||
"IRQ=0x%x\n", priv->irq0_stat);
|
||||
"IRQ=0x%x\n", status);
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
|
||||
if (status & UMAC_IRQ_RXDMA_DONE) {
|
||||
rx_ring = &priv->rx_rings[DESC_INDEX];
|
||||
|
||||
if (likely(napi_schedule_prep(&rx_ring->napi))) {
|
||||
@@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
|
||||
if (status & UMAC_IRQ_TXDMA_DONE) {
|
||||
tx_ring = &priv->tx_rings[DESC_INDEX];
|
||||
|
||||
if (likely(napi_schedule_prep(&tx_ring->napi))) {
|
||||
@@ -2561,20 +2634,21 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
|
||||
UMAC_IRQ_PHY_DET_F |
|
||||
UMAC_IRQ_LINK_EVENT |
|
||||
UMAC_IRQ_HFB_SM |
|
||||
UMAC_IRQ_HFB_MM |
|
||||
UMAC_IRQ_MPD_R)) {
|
||||
/* all other interested interrupts handled in bottom half */
|
||||
schedule_work(&priv->bcmgenet_irq_work);
|
||||
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
|
||||
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
|
||||
wake_up(&priv->wq);
|
||||
}
|
||||
|
||||
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
|
||||
priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
|
||||
priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
|
||||
wake_up(&priv->wq);
|
||||
/* all other interested interrupts handled in bottom half */
|
||||
status &= (UMAC_IRQ_LINK_EVENT |
|
||||
UMAC_IRQ_MPD_R);
|
||||
if (status) {
|
||||
/* Save irq status for bottom-half processing. */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->irq0_stat |= status;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
schedule_work(&priv->bcmgenet_irq_work);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -2801,6 +2875,8 @@ err_irq0:
|
||||
err_fini_dma:
|
||||
bcmgenet_fini_dma(priv);
|
||||
err_clk_disable:
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
return ret;
|
||||
}
|
||||
@@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
||||
*/
|
||||
gphy_rev = reg & 0xffff;
|
||||
|
||||
/* This is reserved so should require special treatment */
|
||||
if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
||||
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This is the good old scheme, just GPHY major, no minor nor patch */
|
||||
if ((gphy_rev & 0xf0) != 0)
|
||||
priv->gphy_rev = gphy_rev << 8;
|
||||
@@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
||||
else if ((gphy_rev & 0xff00) != 0)
|
||||
priv->gphy_rev = gphy_rev;
|
||||
|
||||
/* This is reserved so should require special treatment */
|
||||
else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
||||
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
if (!(params->flags & GENET_HAS_40BITS))
|
||||
pr_warn("GENET does not support 40-bits PA\n");
|
||||
@@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
const void *macaddr;
|
||||
struct resource *r;
|
||||
int err = -EIO;
|
||||
const char *phy_mode_str;
|
||||
|
||||
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
|
||||
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
|
||||
@@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
dev_set_drvdata(&pdev->dev, dev);
|
||||
ether_addr_copy(dev->dev_addr, macaddr);
|
||||
@@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
priv->clk_eee = NULL;
|
||||
}
|
||||
|
||||
/* If this is an internal GPHY, power it on now, before UniMAC is
|
||||
* brought out of reset as absolutely no UniMAC activity is allowed
|
||||
*/
|
||||
if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
|
||||
!strcasecmp(phy_mode_str, "internal"))
|
||||
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
|
||||
|
||||
err = reset_umac(priv);
|
||||
if (err)
|
||||
goto err_clk_disable;
|
||||
@@ -3502,6 +3588,8 @@ static int bcmgenet_resume(struct device *d)
|
||||
return 0;
|
||||
|
||||
out_clk_disable:
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Broadcom Corporation
|
||||
* Copyright (c) 2014-2017 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
|
||||
#define MDIO_REG_SHIFT 16
|
||||
#define MDIO_REG_MASK 0x1F
|
||||
|
||||
#define UMAC_RBUF_OVFL_CNT 0x61C
|
||||
#define UMAC_RBUF_OVFL_CNT_V1 0x61C
|
||||
#define RBUF_OVFL_CNT_V2 0x80
|
||||
#define RBUF_OVFL_CNT_V3PLUS 0x94
|
||||
|
||||
#define UMAC_MPD_CTRL 0x620
|
||||
#define MPD_EN (1 << 0)
|
||||
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
|
||||
|
||||
#define UMAC_MPD_PW_MS 0x624
|
||||
#define UMAC_MPD_PW_LS 0x628
|
||||
#define UMAC_RBUF_ERR_CNT 0x634
|
||||
#define UMAC_RBUF_ERR_CNT_V1 0x634
|
||||
#define RBUF_ERR_CNT_V2 0x84
|
||||
#define RBUF_ERR_CNT_V3PLUS 0x98
|
||||
#define UMAC_MDF_ERR_CNT 0x638
|
||||
#define UMAC_MDF_CTRL 0x650
|
||||
#define UMAC_MDF_ADDR 0x654
|
||||
@@ -619,11 +623,13 @@ struct bcmgenet_priv {
|
||||
struct work_struct bcmgenet_irq_work;
|
||||
int irq0;
|
||||
int irq1;
|
||||
unsigned int irq0_stat;
|
||||
unsigned int irq1_stat;
|
||||
int wol_irq;
|
||||
bool wol_irq_disabled;
|
||||
|
||||
/* shared status */
|
||||
spinlock_t lock;
|
||||
unsigned int irq0_stat;
|
||||
|
||||
/* HW descriptors/checksum variables */
|
||||
bool desc_64b_en;
|
||||
bool desc_rxchk_en;
|
||||
|
||||
@@ -152,7 +152,7 @@ struct octnic_gather {
|
||||
*/
|
||||
struct octeon_sg_entry *sg;
|
||||
|
||||
u64 sg_dma_ptr;
|
||||
dma_addr_t sg_dma_ptr;
|
||||
};
|
||||
|
||||
struct handshake {
|
||||
@@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
|
||||
struct octnic_gather *g;
|
||||
int i;
|
||||
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
|
||||
if (!lio->glist)
|
||||
return;
|
||||
|
||||
@@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
|
||||
do {
|
||||
g = (struct octnic_gather *)
|
||||
list_delete_head(&lio->glist[i]);
|
||||
if (g) {
|
||||
if (g->sg) {
|
||||
dma_unmap_single(&lio->oct_dev->
|
||||
pci_dev->dev,
|
||||
g->sg_dma_ptr,
|
||||
g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
}
|
||||
if (g)
|
||||
kfree(g);
|
||||
}
|
||||
} while (g);
|
||||
|
||||
if (lio->glists_virt_base && lio->glists_virt_base[i]) {
|
||||
lio_dma_free(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
lio->glists_virt_base[i],
|
||||
lio->glists_dma_base[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree((void *)lio->glist);
|
||||
kfree((void *)lio->glist_lock);
|
||||
kfree(lio->glists_virt_base);
|
||||
lio->glists_virt_base = NULL;
|
||||
|
||||
kfree(lio->glists_dma_base);
|
||||
lio->glists_dma_base = NULL;
|
||||
|
||||
kfree(lio->glist);
|
||||
lio->glist = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
||||
lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
|
||||
GFP_KERNEL);
|
||||
if (!lio->glist_lock)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
|
||||
GFP_KERNEL);
|
||||
if (!lio->glist) {
|
||||
kfree((void *)lio->glist_lock);
|
||||
return 1;
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lio->glist_entry_size =
|
||||
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
|
||||
|
||||
/* allocate memory to store virtual and dma base address of
|
||||
* per glist consistent memory
|
||||
*/
|
||||
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
|
||||
GFP_KERNEL);
|
||||
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!lio->glists_virt_base || !lio->glists_dma_base) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_iqs; i++) {
|
||||
@@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
||||
|
||||
INIT_LIST_HEAD(&lio->glist[i]);
|
||||
|
||||
lio->glists_virt_base[i] =
|
||||
lio_dma_alloc(oct,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
&lio->glists_dma_base[i]);
|
||||
|
||||
if (!lio->glists_virt_base[i]) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (j = 0; j < lio->tx_qsize; j++) {
|
||||
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
|
||||
numa_node);
|
||||
@@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
||||
if (!g)
|
||||
break;
|
||||
|
||||
g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
|
||||
OCT_SG_ENTRY_SIZE);
|
||||
g->sg = lio->glists_virt_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
g->sg = kmalloc_node(g->sg_size + 8,
|
||||
GFP_KERNEL, numa_node);
|
||||
if (!g->sg)
|
||||
g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
|
||||
if (!g->sg) {
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
|
||||
/* The gather component should be aligned on 64-bit
|
||||
* boundary
|
||||
*/
|
||||
if (((unsigned long)g->sg) & 7) {
|
||||
g->adjust = 8 - (((unsigned long)g->sg) & 7);
|
||||
g->sg = (struct octeon_sg_entry *)
|
||||
((unsigned long)g->sg + g->adjust);
|
||||
}
|
||||
g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
|
||||
g->sg, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&oct->pci_dev->dev,
|
||||
g->sg_dma_ptr)) {
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
g->sg_dma_ptr = lio->glists_dma_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
list_add_tail(&g->list, &lio->glist[i]);
|
||||
}
|
||||
|
||||
if (j != lio->tx_qsize) {
|
||||
delete_glists(lio);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
|
||||
g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
list_add_tail(&g->list, &lio->glist[iq]);
|
||||
@@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
|
||||
g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
@@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
|
||||
g->sg_size, DMA_TO_DEVICE);
|
||||
dptr = g->sg_dma_ptr;
|
||||
|
||||
if (OCTEON_CN23XX_PF(oct))
|
||||
|
||||
@@ -108,6 +108,8 @@ struct octnic_gather {
|
||||
* received from the IP layer.
|
||||
*/
|
||||
struct octeon_sg_entry *sg;
|
||||
|
||||
dma_addr_t sg_dma_ptr;
|
||||
};
|
||||
|
||||
struct octeon_device_priv {
|
||||
@@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
|
||||
struct octnic_gather *g;
|
||||
int i;
|
||||
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
|
||||
if (!lio->glist)
|
||||
return;
|
||||
|
||||
@@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
|
||||
do {
|
||||
g = (struct octnic_gather *)
|
||||
list_delete_head(&lio->glist[i]);
|
||||
if (g) {
|
||||
if (g->sg)
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
if (g)
|
||||
kfree(g);
|
||||
}
|
||||
} while (g);
|
||||
|
||||
if (lio->glists_virt_base && lio->glists_virt_base[i]) {
|
||||
lio_dma_free(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
lio->glists_virt_base[i],
|
||||
lio->glists_dma_base[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(lio->glists_virt_base);
|
||||
lio->glists_virt_base = NULL;
|
||||
|
||||
kfree(lio->glists_dma_base);
|
||||
lio->glists_dma_base = NULL;
|
||||
|
||||
kfree(lio->glist);
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
|
||||
lio->glist_lock =
|
||||
kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
|
||||
if (!lio->glist_lock)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
lio->glist =
|
||||
kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
|
||||
if (!lio->glist) {
|
||||
kfree(lio->glist_lock);
|
||||
return 1;
|
||||
lio->glist_lock = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lio->glist_entry_size =
|
||||
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
|
||||
|
||||
/* allocate memory to store virtual and dma base address of
|
||||
* per glist consistent memory
|
||||
*/
|
||||
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
|
||||
GFP_KERNEL);
|
||||
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!lio->glists_virt_base || !lio->glists_dma_base) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_iqs; i++) {
|
||||
@@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
|
||||
|
||||
INIT_LIST_HEAD(&lio->glist[i]);
|
||||
|
||||
lio->glists_virt_base[i] =
|
||||
lio_dma_alloc(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
&lio->glists_dma_base[i]);
|
||||
|
||||
if (!lio->glists_virt_base[i]) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (j = 0; j < lio->tx_qsize; j++) {
|
||||
g = kzalloc(sizeof(*g), GFP_KERNEL);
|
||||
if (!g)
|
||||
break;
|
||||
|
||||
g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
|
||||
OCT_SG_ENTRY_SIZE);
|
||||
g->sg = lio->glists_virt_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
|
||||
if (!g->sg) {
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
g->sg_dma_ptr = lio->glists_dma_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
/* The gather component should be aligned on 64-bit
|
||||
* boundary
|
||||
*/
|
||||
if (((unsigned long)g->sg) & 7) {
|
||||
g->adjust = 8 - (((unsigned long)g->sg) & 7);
|
||||
g->sg = (struct octeon_sg_entry *)
|
||||
((unsigned long)g->sg + g->adjust);
|
||||
}
|
||||
list_add_tail(&g->list, &lio->glist[i]);
|
||||
}
|
||||
|
||||
if (j != lio->tx_qsize) {
|
||||
delete_glists(lio);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_unmap_single(&lio->oct_dev->pci_dev->dev,
|
||||
finfo->dptr, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
@@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_unmap_single(&lio->oct_dev->pci_dev->dev,
|
||||
finfo->dptr, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
@@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
i++;
|
||||
}
|
||||
|
||||
dptr = dma_map_single(&oct->pci_dev->dev,
|
||||
g->sg, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
|
||||
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
|
||||
__func__);
|
||||
dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
|
||||
skb->len - skb->data_len,
|
||||
DMA_TO_DEVICE);
|
||||
for (j = 1; j <= frags; j++) {
|
||||
frag = &skb_shinfo(skb)->frags[j - 1];
|
||||
dma_unmap_page(&oct->pci_dev->dev,
|
||||
g->sg[j >> 2].ptr[j & 3],
|
||||
frag->size, DMA_TO_DEVICE);
|
||||
}
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
dptr = g->sg_dma_ptr;
|
||||
|
||||
ndata.cmd.cmd3.dptr = dptr;
|
||||
finfo->dptr = dptr;
|
||||
|
||||
@@ -71,17 +71,17 @@
|
||||
#define CN23XX_MAX_RINGS_PER_VF 8
|
||||
|
||||
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 2048
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 512
|
||||
#define CN23XX_DB_MIN 1
|
||||
#define CN23XX_DB_MAX 8
|
||||
#define CN23XX_DB_TIMEOUT 1
|
||||
|
||||
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 2048
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 512
|
||||
#define CN23XX_OQ_BUF_SIZE 1536
|
||||
#define CN23XX_OQ_PKTSPER_INTR 128
|
||||
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
|
||||
#define CN23XX_OQ_REFIL_THRESHOLD 128
|
||||
#define CN23XX_OQ_REFIL_THRESHOLD 16
|
||||
|
||||
#define CN23XX_OQ_INTR_PKT 64
|
||||
#define CN23XX_OQ_INTR_TIME 100
|
||||
|
||||
@@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
|
||||
recv_buffer_destroy(droq->recv_buf_list[i].buffer,
|
||||
pg_info);
|
||||
|
||||
if (droq->desc_ring && droq->desc_ring[i].info_ptr)
|
||||
lio_unmap_ring_info(oct->pci_dev,
|
||||
(u64)droq->
|
||||
desc_ring[i].info_ptr,
|
||||
OCT_DROQ_INFO_SIZE);
|
||||
droq->recv_buf_list[i].buffer = NULL;
|
||||
}
|
||||
|
||||
@@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
|
||||
vfree(droq->recv_buf_list);
|
||||
|
||||
if (droq->info_base_addr)
|
||||
cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
|
||||
droq->info_alloc_size,
|
||||
droq->info_base_addr,
|
||||
droq->info_list_dma);
|
||||
lio_free_info_buffer(oct, droq);
|
||||
|
||||
if (droq->desc_ring)
|
||||
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
|
||||
@@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
|
||||
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
|
||||
droq->max_count);
|
||||
|
||||
droq->info_list =
|
||||
cnnic_numa_alloc_aligned_dma((droq->max_count *
|
||||
OCT_DROQ_INFO_SIZE),
|
||||
&droq->info_alloc_size,
|
||||
&droq->info_base_addr,
|
||||
numa_node);
|
||||
droq->info_list = lio_alloc_info_buffer(oct, droq);
|
||||
if (!droq->info_list) {
|
||||
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
|
||||
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user