You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix NAPI poll list corruption in enic driver, from Christian
Lamparter.
2) Fix route use after free, from Eric Dumazet.
3) Fix regression in reuseaddr handling, from Josef Bacik.
4) Assert the size of control messages in compat handling since we copy
it in from userspace twice. From Meng Xu.
5) SMC layer bug fixes (missing RCU locking, bad refcounting, etc.)
from Ursula Braun.
6) Fix races in AF_PACKET fanout handling, from Willem de Bruijn.
7) Don't use ARRAY_SIZE on spinlock array which might have zero
entries, from Geert Uytterhoeven.
8) Fix miscomputation of checksum in ipv6 udp code, from Subash Abhinov
Kasiviswanathan.
9) Push the ipv6 header properly in ipv6 GRE tunnel driver, from Xin
Long.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (75 commits)
inet: fix improper empty comparison
net: use inet6_rcv_saddr to compare sockets
net: set tb->fast_sk_family
net: orphan frags on stand-alone ptype in dev_queue_xmit_nit
MAINTAINERS: update git tree locations for ieee802154 subsystem
net: prevent dst uses after free
net: phy: Fix truncation of large IRQ numbers in phy_attached_print()
net/smc: no close wait in case of process shut down
net/smc: introduce a delay
net/smc: terminate link group if out-of-sync is received
net/smc: longer delay for client link group removal
net/smc: adapt send request completion notification
net/smc: adjust net_device refcount
net/smc: take RCU read lock for routing cache lookup
net/smc: add receive timeout check
net/smc: add missing dev_put
net: stmmac: Cocci spatch "of_table"
lan78xx: Use default values loaded from EEPROM/OTP after reset
lan78xx: Allow EEPROM write for less than MAX_EEPROM_SIZE
lan78xx: Fix for eeprom read/write when device auto suspend
...
This commit is contained in:
@@ -1680,6 +1680,9 @@ accept_dad - INTEGER
|
||||
2: Enable DAD, and disable IPv6 operation if MAC-based duplicate
|
||||
link-local address has been found.
|
||||
|
||||
DAD operation and mode on a given interface will be selected according
|
||||
to the maximum value of conf/{all,interface}/accept_dad.
|
||||
|
||||
force_tllao - BOOLEAN
|
||||
Enable sending the target link-layer address option even when
|
||||
responding to a unicast neighbor solicitation.
|
||||
@@ -1727,16 +1730,23 @@ suppress_frag_ndisc - INTEGER
|
||||
|
||||
optimistic_dad - BOOLEAN
|
||||
Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
|
||||
Optimistic Duplicate Address Detection for the interface will be enabled
|
||||
if at least one of conf/{all,interface}/optimistic_dad is set to 1,
|
||||
it will be disabled otherwise.
|
||||
|
||||
use_optimistic - BOOLEAN
|
||||
If enabled, do not classify optimistic addresses as deprecated during
|
||||
source address selection. Preferred addresses will still be chosen
|
||||
before optimistic addresses, subject to other ranking in the source
|
||||
address selection algorithm.
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
0: disabled (default)
|
||||
1: enabled
|
||||
|
||||
This will be enabled if at least one of
|
||||
conf/{all,interface}/use_optimistic is set to 1, disabled otherwise.
|
||||
|
||||
stable_secret - IPv6 address
|
||||
This IPv6 address will be used as a secret to generate IPv6
|
||||
|
||||
@@ -13,42 +13,42 @@ an example setup using a data-center-class switch ASIC chip. Other setups
|
||||
with SR-IOV or soft switches, such as OVS, are possible.
|
||||
|
||||
|
||||
User-space tools
|
||||
User-space tools
|
||||
|
||||
user space |
|
||||
+-------------------------------------------------------------------+
|
||||
kernel | Netlink
|
||||
|
|
||||
+--------------+-------------------------------+
|
||||
| Network stack |
|
||||
| (Linux) |
|
||||
| |
|
||||
+----------------------------------------------+
|
||||
user space |
|
||||
+-------------------------------------------------------------------+
|
||||
kernel | Netlink
|
||||
|
|
||||
+--------------+-------------------------------+
|
||||
| Network stack |
|
||||
| (Linux) |
|
||||
| |
|
||||
+----------------------------------------------+
|
||||
|
||||
sw1p2 sw1p4 sw1p6
|
||||
sw1p1 + sw1p3 + sw1p5 + eth1
|
||||
+ | + | + | +
|
||||
| | | | | | |
|
||||
+--+----+----+----+-+--+----+---+ +-----+-----+
|
||||
| Switch driver | | mgmt |
|
||||
| (this document) | | driver |
|
||||
| | | |
|
||||
+--------------+----------------+ +-----------+
|
||||
|
|
||||
kernel | HW bus (eg PCI)
|
||||
+-------------------------------------------------------------------+
|
||||
hardware |
|
||||
+--------------+---+------------+
|
||||
| Switch device (sw1) |
|
||||
| +----+ +--------+
|
||||
| | v offloaded data path | mgmt port
|
||||
| | | |
|
||||
+--|----|----+----+----+----+---+
|
||||
| | | | | |
|
||||
+ + + + + +
|
||||
p1 p2 p3 p4 p5 p6
|
||||
sw1p1 + sw1p3 + sw1p5 + eth1
|
||||
+ | + | + | +
|
||||
| | | | | | |
|
||||
+--+----+----+----+----+----+---+ +-----+-----+
|
||||
| Switch driver | | mgmt |
|
||||
| (this document) | | driver |
|
||||
| | | |
|
||||
+--------------+----------------+ +-----------+
|
||||
|
|
||||
kernel | HW bus (eg PCI)
|
||||
+-------------------------------------------------------------------+
|
||||
hardware |
|
||||
+--------------+----------------+
|
||||
| Switch device (sw1) |
|
||||
| +----+ +--------+
|
||||
| | v offloaded data path | mgmt port
|
||||
| | | |
|
||||
+--|----|----+----+----+----+---+
|
||||
| | | | | |
|
||||
+ + + + + +
|
||||
p1 p2 p3 p4 p5 p6
|
||||
|
||||
front-panel ports
|
||||
front-panel ports
|
||||
|
||||
|
||||
Fig 1.
|
||||
|
||||
+2
-4
@@ -2865,7 +2865,6 @@ S: Supported
|
||||
F: drivers/scsi/bnx2i/
|
||||
|
||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||
M: Yuval Mintz <Yuval.Mintz@cavium.com>
|
||||
M: Ariel Elior <ariel.elior@cavium.com>
|
||||
M: everest-linux-l2@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
@@ -6655,8 +6654,8 @@ M: Alexander Aring <alex.aring@gmail.com>
|
||||
M: Stefan Schmidt <stefan@osg.samsung.com>
|
||||
L: linux-wpan@vger.kernel.org
|
||||
W: http://wpan.cakelab.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
|
||||
S: Maintained
|
||||
F: net/ieee802154/
|
||||
F: net/mac802154/
|
||||
@@ -11059,7 +11058,6 @@ S: Supported
|
||||
F: drivers/scsi/qedi/
|
||||
|
||||
QLOGIC QL4xxx ETHERNET DRIVER
|
||||
M: Yuval Mintz <Yuval.Mintz@cavium.com>
|
||||
M: Ariel Elior <Ariel.Elior@cavium.com>
|
||||
M: everest-linux-l2@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
|
||||
+25
-12
@@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
|
||||
isdn_net_local *lp;
|
||||
struct ippp_struct *is;
|
||||
int proto;
|
||||
unsigned char protobuf[4];
|
||||
|
||||
is = file->private_data;
|
||||
|
||||
@@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
|
||||
if (!lp)
|
||||
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
|
||||
else {
|
||||
/*
|
||||
* Don't reset huptimer for
|
||||
* LCP packets. (Echo requests).
|
||||
*/
|
||||
if (copy_from_user(protobuf, buf, 4))
|
||||
return -EFAULT;
|
||||
proto = PPP_PROTOCOL(protobuf);
|
||||
if (proto != PPP_LCP)
|
||||
lp->huptimer = 0;
|
||||
if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
|
||||
unsigned char protobuf[4];
|
||||
/*
|
||||
* Don't reset huptimer for
|
||||
* LCP packets. (Echo requests).
|
||||
*/
|
||||
if (copy_from_user(protobuf, buf, 4))
|
||||
return -EFAULT;
|
||||
|
||||
proto = PPP_PROTOCOL(protobuf);
|
||||
if (proto != PPP_LCP)
|
||||
lp->huptimer = 0;
|
||||
|
||||
if (lp->isdn_device < 0 || lp->isdn_channel < 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
|
||||
lp->dialstate == 0 &&
|
||||
(lp->flags & ISDN_NET_CONNECTED)) {
|
||||
unsigned short hl;
|
||||
struct sk_buff *skb;
|
||||
unsigned char *cpy_buf;
|
||||
/*
|
||||
* we need to reserve enough space in front of
|
||||
* sk_buff. old call to dev_alloc_skb only reserved
|
||||
@@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
|
||||
return count;
|
||||
}
|
||||
skb_reserve(skb, hl);
|
||||
if (copy_from_user(skb_put(skb, count), buf, count))
|
||||
cpy_buf = skb_put(skb, count);
|
||||
if (copy_from_user(cpy_buf, buf, count))
|
||||
{
|
||||
kfree_skb(skb);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't reset huptimer for
|
||||
* LCP packets. (Echo requests).
|
||||
*/
|
||||
proto = PPP_PROTOCOL(cpy_buf);
|
||||
if (proto != PPP_LCP)
|
||||
lp->huptimer = 0;
|
||||
|
||||
if (is->debug & 0x40) {
|
||||
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
|
||||
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
|
||||
|
||||
@@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
|
||||
netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
|
||||
}
|
||||
|
||||
static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
|
||||
u64 *tx_bytes, u64 *tx_packets)
|
||||
{
|
||||
struct bcm_sysport_tx_ring *ring;
|
||||
u64 bytes = 0, packets = 0;
|
||||
unsigned int start;
|
||||
unsigned int q;
|
||||
|
||||
for (q = 0; q < priv->netdev->num_tx_queues; q++) {
|
||||
ring = &priv->tx_rings[q];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
||||
bytes = ring->bytes;
|
||||
packets = ring->packets;
|
||||
} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
|
||||
|
||||
*tx_bytes += bytes;
|
||||
*tx_packets += packets;
|
||||
}
|
||||
}
|
||||
|
||||
static void bcm_sysport_get_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
@@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev,
|
||||
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
|
||||
struct u64_stats_sync *syncp = &priv->syncp;
|
||||
struct bcm_sysport_tx_ring *ring;
|
||||
u64 tx_bytes = 0, tx_packets = 0;
|
||||
unsigned int start;
|
||||
int i, j;
|
||||
|
||||
if (netif_running(dev))
|
||||
if (netif_running(dev)) {
|
||||
bcm_sysport_update_mib_counters(priv);
|
||||
bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
|
||||
stats64->tx_bytes = tx_bytes;
|
||||
stats64->tx_packets = tx_packets;
|
||||
}
|
||||
|
||||
for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
|
||||
const struct bcm_sysport_stats *s;
|
||||
@@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev,
|
||||
continue;
|
||||
p += s->stat_offset;
|
||||
|
||||
if (s->stat_sizeof == sizeof(u64))
|
||||
if (s->stat_sizeof == sizeof(u64) &&
|
||||
s->type == BCM_SYSPORT_STAT_NETDEV64) {
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(syncp);
|
||||
data[i] = *(u64 *)p;
|
||||
} while (u64_stats_fetch_retry_irq(syncp, start));
|
||||
else
|
||||
} else
|
||||
data[i] = *(u32 *)p;
|
||||
j++;
|
||||
}
|
||||
@@ -1716,27 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
|
||||
{
|
||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
|
||||
struct bcm_sysport_tx_ring *ring;
|
||||
u64 tx_packets = 0, tx_bytes = 0;
|
||||
unsigned int start;
|
||||
unsigned int q;
|
||||
|
||||
netdev_stats_to_stats64(stats, &dev->stats);
|
||||
|
||||
for (q = 0; q < dev->num_tx_queues; q++) {
|
||||
ring = &priv->tx_rings[q];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
||||
tx_bytes = ring->bytes;
|
||||
tx_packets = ring->packets;
|
||||
} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
|
||||
|
||||
stats->tx_bytes += tx_bytes;
|
||||
stats->tx_packets += tx_packets;
|
||||
}
|
||||
|
||||
stats64->tx_bytes = stats->tx_bytes;
|
||||
stats64->tx_packets = stats->tx_packets;
|
||||
bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
|
||||
&stats->tx_packets);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
||||
|
||||
@@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
|
||||
cls_flower->common.chain_index)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (cls_flower->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
|
||||
|
||||
@@ -374,8 +374,8 @@ struct bufdesc_ex {
|
||||
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
|
||||
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
|
||||
|
||||
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
|
||||
#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
|
||||
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
|
||||
#define FEC_NAPI_IMASK FEC_ENET_MII
|
||||
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
|
||||
|
||||
/* ENET interrupt coalescing macro define */
|
||||
|
||||
@@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
|
||||
if (int_events == 0)
|
||||
return false;
|
||||
|
||||
if (int_events & FEC_ENET_RXF)
|
||||
if (int_events & FEC_ENET_RXF_0)
|
||||
fep->work_rx |= (1 << 2);
|
||||
if (int_events & FEC_ENET_RXF_1)
|
||||
fep->work_rx |= (1 << 0);
|
||||
if (int_events & FEC_ENET_RXF_2)
|
||||
fep->work_rx |= (1 << 1);
|
||||
|
||||
if (int_events & FEC_ENET_TXF)
|
||||
if (int_events & FEC_ENET_TXF_0)
|
||||
fep->work_tx |= (1 << 2);
|
||||
if (int_events & FEC_ENET_TXF_1)
|
||||
fep->work_tx |= (1 << 0);
|
||||
@@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (fep->ptp_clock)
|
||||
fec_ptp_check_pps_event(fep);
|
||||
|
||||
if (fec_ptp_check_pps_event(fep))
|
||||
ret = IRQ_HANDLED;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
|
||||
}
|
||||
|
||||
static int hnae3_match_n_instantiate(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev,
|
||||
bool is_reg, bool *matched)
|
||||
struct hnae3_ae_dev *ae_dev, bool is_reg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
*matched = false;
|
||||
|
||||
/* check if this client matches the type of ae_dev */
|
||||
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
|
||||
hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
|
||||
return 0;
|
||||
}
|
||||
/* there is a match of client and dev */
|
||||
*matched = true;
|
||||
|
||||
/* now, (un-)instantiate client by calling lower layer */
|
||||
if (is_reg) {
|
||||
@@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client)
|
||||
{
|
||||
struct hnae3_client *client_tmp;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
bool matched;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
@@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client)
|
||||
/* if the client could not be initialized on current port, for
|
||||
* any error reasons, move on to next available port
|
||||
*/
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched);
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed for port\n");
|
||||
@@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client);
|
||||
void hnae3_unregister_client(struct hnae3_client *client)
|
||||
{
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
bool matched;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* un-initialize the client on every matched port */
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
hnae3_match_n_instantiate(client, ae_dev, false, &matched);
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
}
|
||||
|
||||
list_del(&client->node);
|
||||
@@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
@@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
* initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true,
|
||||
&matched);
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed\n");
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_dev */
|
||||
@@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
||||
/* check the client list for the match with this ae_dev type and
|
||||
* un-initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
hnae3_match_n_instantiate(client, ae_dev, false,
|
||||
&matched);
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(client, &hnae3_client_list, node)
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
@@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_algo *ae_algo;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
@@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
* initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true,
|
||||
&matched);
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed\n");
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
|
||||
out_err:
|
||||
@@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
const struct pci_device_id *id;
|
||||
struct hnae3_ae_algo *ae_algo;
|
||||
struct hnae3_client *client;
|
||||
bool matched;
|
||||
|
||||
mutex_lock(&hnae3_common_lock);
|
||||
/* Check if there are matched ae_algo */
|
||||
@@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
if (!id)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
hnae3_match_n_instantiate(client, ae_dev, false,
|
||||
&matched);
|
||||
if (matched)
|
||||
break;
|
||||
}
|
||||
list_for_each_entry(client, &hnae3_client_list, node)
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
|
||||
@@ -49,7 +49,17 @@
|
||||
#define HNAE3_CLASS_NAME_SIZE 16
|
||||
|
||||
#define HNAE3_DEV_INITED_B 0x0
|
||||
#define HNAE_DEV_SUPPORT_ROCE_B 0x1
|
||||
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
|
||||
#define HNAE3_DEV_SUPPORT_DCB_B 0x2
|
||||
|
||||
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
|
||||
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
|
||||
|
||||
#define hnae3_dev_roce_supported(hdev) \
|
||||
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
|
||||
|
||||
#define hnae3_dev_dcb_supported(hdev) \
|
||||
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
|
||||
|
||||
#define ring_ptr_move_fw(ring, p) \
|
||||
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
|
||||
@@ -366,12 +376,12 @@ struct hnae3_ae_algo {
|
||||
struct hnae3_tc_info {
|
||||
u16 tqp_offset; /* TQP offset from base TQP */
|
||||
u16 tqp_count; /* Total TQPs */
|
||||
u8 up; /* user priority */
|
||||
u8 tc; /* TC index */
|
||||
bool enable; /* If this TC is enable or not */
|
||||
};
|
||||
|
||||
#define HNAE3_MAX_TC 8
|
||||
#define HNAE3_MAX_USER_PRIO 8
|
||||
struct hnae3_knic_private_info {
|
||||
struct net_device *netdev; /* Set by KNIC client when init instance */
|
||||
u16 rss_size; /* Allocated RSS queues */
|
||||
@@ -379,6 +389,7 @@ struct hnae3_knic_private_info {
|
||||
u16 num_desc;
|
||||
|
||||
u8 num_tc; /* Total number of enabled TCs */
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
|
||||
struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
|
||||
|
||||
u16 num_tqps; /* total number of TQPs in this handle */
|
||||
|
||||
@@ -238,7 +238,7 @@ struct hclge_tqp_map {
|
||||
u8 rsv[18];
|
||||
};
|
||||
|
||||
#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11
|
||||
#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
|
||||
|
||||
enum hclge_int_type {
|
||||
HCLGE_INT_TX,
|
||||
@@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain {
|
||||
#define HCLGE_INT_TYPE_S 0
|
||||
#define HCLGE_INT_TYPE_M 0x3
|
||||
#define HCLGE_TQP_ID_S 2
|
||||
#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S)
|
||||
#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
|
||||
#define HCLGE_INT_GL_IDX_S 13
|
||||
#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
|
||||
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
|
||||
u8 vfid;
|
||||
u8 rsv;
|
||||
};
|
||||
|
||||
#define HCLGE_TC_NUM 8
|
||||
@@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc {
|
||||
|
||||
struct hclge_rx_priv_buff {
|
||||
__le16 buf_num[HCLGE_TC_NUM];
|
||||
u8 rsv[8];
|
||||
__le16 shared_buf;
|
||||
u8 rsv[6];
|
||||
};
|
||||
|
||||
struct hclge_query_version {
|
||||
@@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue {
|
||||
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
|
||||
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
|
||||
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
|
||||
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
|
||||
|
||||
#define HCLGE_TYPE_CRQ 0
|
||||
#define HCLGE_TYPE_CSQ 1
|
||||
|
||||
@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
|
||||
/* Required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
||||
static const struct pci_device_id roce_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
|
||||
/* Required last entry */
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
||||
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
|
||||
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
|
||||
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
|
||||
|
||||
if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) {
|
||||
if (hnae3_dev_roce_supported(hdev)) {
|
||||
hdev->num_roce_msix =
|
||||
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
|
||||
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
|
||||
@@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev)
|
||||
hdev->base_tqp_pid = 0;
|
||||
hdev->rss_size_max = 1;
|
||||
hdev->rx_buf_len = cfg.rx_buf_len;
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
|
||||
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
|
||||
hdev->hw.mac.media_type = cfg.media_type;
|
||||
hdev->hw.mac.phy_addr = cfg.phy_addr;
|
||||
hdev->num_desc = cfg.tqp_desc_num;
|
||||
hdev->tm_info.num_pg = 1;
|
||||
hdev->tm_info.num_tc = cfg.tc_num;
|
||||
@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
|
||||
tc_num = hclge_get_tc_num(hdev);
|
||||
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
|
||||
|
||||
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
|
||||
if (hnae3_dev_dcb_supported(hdev))
|
||||
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
|
||||
else
|
||||
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
|
||||
|
||||
shared_buf_tc = pfc_enable_num * hdev->mps +
|
||||
(tc_num - pfc_enable_num) * hdev->mps / 2 +
|
||||
hdev->mps;
|
||||
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
|
||||
struct hclge_priv_buf *priv;
|
||||
int i;
|
||||
|
||||
/* When DCB is not supported, rx private
|
||||
* buffer is not allocated.
|
||||
*/
|
||||
if (!hnae3_dev_dcb_supported(hdev)) {
|
||||
if (!hclge_is_rx_buf_ok(hdev, rx_all))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* step 1, try to alloc private buffer for all enabled tc */
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
priv = &hdev->priv_buf[i];
|
||||
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
|
||||
priv->wl.high = 2 * hdev->mps;
|
||||
priv->buf_size = priv->wl.high;
|
||||
}
|
||||
} else {
|
||||
priv->enable = 0;
|
||||
priv->wl.low = 0;
|
||||
priv->wl.high = 0;
|
||||
priv->buf_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
priv = &hdev->priv_buf[i];
|
||||
|
||||
if (hdev->hw_tc_map & BIT(i))
|
||||
priv->enable = 1;
|
||||
priv->enable = 0;
|
||||
priv->wl.low = 0;
|
||||
priv->wl.high = 0;
|
||||
priv->buf_size = 0;
|
||||
|
||||
if (!(hdev->hw_tc_map & BIT(i)))
|
||||
continue;
|
||||
|
||||
priv->enable = 1;
|
||||
|
||||
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
|
||||
priv->wl.low = 128;
|
||||
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
|
||||
cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
|
||||
}
|
||||
|
||||
req->shared_buf =
|
||||
cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
|
||||
(1 << HCLGE_TC0_PRI_BUF_EN_B));
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_rx_priv_wl_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure rx private waterline %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (hnae3_dev_dcb_supported(hdev)) {
|
||||
ret = hclge_rx_priv_wl_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure rx private waterline %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_common_thrd_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure common threshold %d\n", ret);
|
||||
return ret;
|
||||
ret = hclge_common_thrd_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"could not configure common threshold %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hclge_common_wl_config(hdev);
|
||||
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
u16 tc_valid[HCLGE_MAX_TC_NUM];
|
||||
u16 tc_size[HCLGE_MAX_TC_NUM];
|
||||
u32 *rss_indir = NULL;
|
||||
u16 rss_size = 0, roundup_size;
|
||||
const u8 *key;
|
||||
int i, ret, j;
|
||||
|
||||
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
|
||||
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
|
||||
vport[j].rss_indirection_tbl[i] =
|
||||
i % hdev->rss_size_max;
|
||||
i % vport[j].alloc_rss_size;
|
||||
|
||||
/* vport 0 is for PF */
|
||||
if (j != 0)
|
||||
continue;
|
||||
|
||||
rss_size = vport[j].alloc_rss_size;
|
||||
rss_indir[i] = vport[j].rss_indirection_tbl[i];
|
||||
}
|
||||
}
|
||||
@@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
if (hdev->hw_tc_map & BIT(i))
|
||||
tc_valid[i] = 1;
|
||||
else
|
||||
tc_valid[i] = 0;
|
||||
|
||||
switch (hdev->rss_size_max) {
|
||||
case HCLGE_RSS_TC_SIZE_0:
|
||||
tc_size[i] = 0;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_1:
|
||||
tc_size[i] = 1;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_2:
|
||||
tc_size[i] = 2;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_3:
|
||||
tc_size[i] = 3;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_4:
|
||||
tc_size[i] = 4;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_5:
|
||||
tc_size[i] = 5;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_6:
|
||||
tc_size[i] = 6;
|
||||
break;
|
||||
case HCLGE_RSS_TC_SIZE_7:
|
||||
tc_size[i] = 7;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
tc_offset[i] = hdev->rss_size_max * i;
|
||||
/* Each TC have the same queue size, and tc_size set to hardware is
|
||||
* the log2 of roundup power of two of rss_size, the acutal queue
|
||||
* size is limited by indirection table.
|
||||
*/
|
||||
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"Configure rss tc size failed, invalid TC_SIZE = %d\n",
|
||||
rss_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
roundup_size = roundup_pow_of_two(rss_size);
|
||||
roundup_size = ilog2(roundup_size);
|
||||
|
||||
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
|
||||
tc_valid[i] = 0;
|
||||
|
||||
if (!(hdev->hw_tc_map & BIT(i)))
|
||||
continue;
|
||||
|
||||
tc_valid[i] = 1;
|
||||
tc_size[i] = roundup_size;
|
||||
tc_offset[i] = rss_size * i;
|
||||
}
|
||||
|
||||
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
|
||||
|
||||
err:
|
||||
@@ -2679,7 +2699,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
|
||||
HCLGE_TQP_ID_S, node->tqp_index);
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
|
||||
req->vfid = vport->vport_id;
|
||||
|
||||
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
|
||||
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
|
||||
@@ -2763,8 +2787,12 @@ static int hclge_unmap_ring_from_vector(
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
|
||||
HCLGE_TQP_ID_S, node->tqp_index);
|
||||
hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
|
||||
HCLGE_INT_GL_IDX_S,
|
||||
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
|
||||
|
||||
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
|
||||
req->vfid = vport->vport_id;
|
||||
|
||||
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
|
||||
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
|
||||
@@ -2778,7 +2806,7 @@ static int hclge_unmap_ring_from_vector(
|
||||
}
|
||||
i = 0;
|
||||
hclge_cmd_setup_basic_desc(&desc,
|
||||
HCLGE_OPC_ADD_RING_TO_VECTOR,
|
||||
HCLGE_OPC_DEL_RING_TO_VECTOR,
|
||||
false);
|
||||
req->int_vector_id = vector_id;
|
||||
}
|
||||
@@ -3665,6 +3693,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_VLAN_TYPE_VF_TABLE 0
|
||||
#define HCLGE_VLAN_TYPE_PORT_TABLE 1
|
||||
struct hnae3_handle *handle;
|
||||
int ret;
|
||||
|
||||
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
|
||||
@@ -3674,8 +3703,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
|
||||
|
||||
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
handle = &hdev->vport[0].nic;
|
||||
return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
|
||||
}
|
||||
|
||||
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
|
||||
@@ -3920,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
||||
goto err;
|
||||
|
||||
if (hdev->roce_client &&
|
||||
hnae_get_bit(hdev->ae_dev->flag,
|
||||
HNAE_DEV_SUPPORT_ROCE_B)) {
|
||||
hnae3_dev_roce_supported(hdev)) {
|
||||
struct hnae3_client *rc = hdev->roce_client;
|
||||
|
||||
ret = hclge_init_roce_base_info(vport);
|
||||
@@ -3944,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
||||
|
||||
break;
|
||||
case HNAE3_CLIENT_ROCE:
|
||||
if (hnae_get_bit(hdev->ae_dev->flag,
|
||||
HNAE_DEV_SUPPORT_ROCE_B)) {
|
||||
if (hnae3_dev_roce_supported(hdev)) {
|
||||
hdev->roce_client = client;
|
||||
vport->roce.client = client;
|
||||
}
|
||||
@@ -4057,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
|
||||
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
{
|
||||
struct pci_dev *pdev = ae_dev->pdev;
|
||||
const struct pci_device_id *id;
|
||||
struct hclge_dev *hdev;
|
||||
int ret;
|
||||
|
||||
@@ -4072,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
hdev->ae_dev = ae_dev;
|
||||
ae_dev->priv = hdev;
|
||||
|
||||
id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
|
||||
if (id)
|
||||
hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
|
||||
|
||||
ret = hclge_pci_init(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "PCI init failed\n");
|
||||
@@ -4138,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_rss_init_hw(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_init_vlan_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
|
||||
@@ -4156,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_rss_init_hw(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
setup_timer(&hdev->service_timer, hclge_service_timer,
|
||||
(unsigned long)hdev);
|
||||
INIT_WORK(&hdev->service_task, hclge_service_task);
|
||||
|
||||
@@ -176,7 +176,6 @@ struct hclge_pg_info {
|
||||
struct hclge_tc_info {
|
||||
u8 tc_id;
|
||||
u8 tc_sch_mode; /* 0: sp; 1: dwrr */
|
||||
u8 up;
|
||||
u8 pgid;
|
||||
u32 bw_limit;
|
||||
};
|
||||
@@ -197,6 +196,7 @@ struct hclge_tm_info {
|
||||
u8 num_tc;
|
||||
u8 num_pg; /* It must be 1 if vNET-Base schd */
|
||||
u8 pg_dwrr[HCLGE_PG_NUM];
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO];
|
||||
struct hclge_pg_info pg_info[HCLGE_PG_NUM];
|
||||
struct hclge_tc_info tc_info[HNAE3_MAX_TC];
|
||||
enum hclge_fc_mode fc_mode;
|
||||
@@ -477,6 +477,7 @@ struct hclge_vport {
|
||||
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
|
||||
/* User configured lookup table entries */
|
||||
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
|
||||
u16 alloc_rss_size;
|
||||
|
||||
u16 qs_offset;
|
||||
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
|
||||
|
||||
@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
|
||||
{
|
||||
u8 tc;
|
||||
|
||||
for (tc = 0; tc < hdev->tm_info.num_tc; tc++)
|
||||
if (hdev->tm_info.tc_info[tc].up == pri_id)
|
||||
break;
|
||||
tc = hdev->tm_info.prio_tc[pri_id];
|
||||
|
||||
if (tc >= hdev->tm_info.num_tc)
|
||||
return -EINVAL;
|
||||
@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
|
||||
|
||||
for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) {
|
||||
for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
|
||||
ret = hclge_fill_pri_array(hdev, pri, pri_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
|
||||
|
||||
shap_cfg_cmd->pg_id = pg_id;
|
||||
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
|
||||
|
||||
shap_cfg_cmd->pri_id = pri_id;
|
||||
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
|
||||
hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
kinfo->num_tqps / kinfo->num_tc);
|
||||
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
|
||||
vport->dwrr = 100; /* 100 percent as init */
|
||||
vport->alloc_rss_size = kinfo->rss_size;
|
||||
|
||||
for (i = 0; i < kinfo->num_tc; i++) {
|
||||
if (hdev->hw_tc_map & BIT(i)) {
|
||||
@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
|
||||
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
|
||||
kinfo->tc_info[i].tc = i;
|
||||
kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
|
||||
} else {
|
||||
/* Set to default queue if TC is disable */
|
||||
kinfo->tc_info[i].enable = false;
|
||||
kinfo->tc_info[i].tqp_offset = 0;
|
||||
kinfo->tc_info[i].tqp_count = 1;
|
||||
kinfo->tc_info[i].tc = 0;
|
||||
kinfo->tc_info[i].up = 0;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
|
||||
FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
|
||||
}
|
||||
|
||||
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
|
||||
@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
hdev->tm_info.tc_info[i].tc_id = i;
|
||||
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
|
||||
hdev->tm_info.tc_info[i].up = i;
|
||||
hdev->tm_info.tc_info[i].pgid = 0;
|
||||
hdev->tm_info.tc_info[i].bw_limit =
|
||||
hdev->tm_info.pg_info[0].bw_limit;
|
||||
}
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
hdev->tm_info.prio_tc[i] =
|
||||
(i >= hdev->tm_info.num_tc) ? 0 : i;
|
||||
|
||||
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
|
||||
}
|
||||
|
||||
@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Only DCB-supported dev supports qset back pressure setting */
|
||||
if (!hnae3_dev_dcb_supported(hdev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
ret = hclge_tm_qs_bp_cfg(hdev, i);
|
||||
if (ret)
|
||||
|
||||
@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd {
|
||||
u32 rsvd1;
|
||||
};
|
||||
|
||||
#define hclge_tm_set_feild(dest, string, val) \
|
||||
#define hclge_tm_set_field(dest, string, val) \
|
||||
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
|
||||
(HCLGE_TM_SHAP_##string##_LSH), val)
|
||||
#define hclge_tm_get_feild(src, string) \
|
||||
#define hclge_tm_get_field(src, string) \
|
||||
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
|
||||
(HCLGE_TM_SHAP_##string##_LSH))
|
||||
|
||||
|
||||
@@ -41,11 +41,16 @@ static struct hnae3_client client;
|
||||
static const struct pci_device_id hns3_pci_tbl[] = {
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
|
||||
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
ae_dev->pdev = pdev;
|
||||
ae_dev->flag = ent->driver_data;
|
||||
ae_dev->dev_type = HNAE3_DEV_KNIC;
|
||||
pci_set_drvdata(pdev, ae_dev);
|
||||
|
||||
@@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev)
|
||||
eth_hw_addr_random(netdev);
|
||||
dev_warn(priv->dev, "using random MAC address %pM\n",
|
||||
netdev->dev_addr);
|
||||
/* Also copy this new MAC address into hdev */
|
||||
if (h->ae_algo->ops->set_mac_addr)
|
||||
h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
|
||||
}
|
||||
|
||||
if (h->ae_algo->ops->set_mac_addr)
|
||||
h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
|
||||
|
||||
}
|
||||
|
||||
static void hns3_nic_set_priv_ops(struct net_device *netdev)
|
||||
|
||||
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
|
||||
unsigned long flags;
|
||||
|
||||
MAL_DBG2(mal, "poll(%d)" NL, budget);
|
||||
again:
|
||||
|
||||
/* Process TX skbs */
|
||||
list_for_each(l, &mal->poll_list) {
|
||||
struct mal_commac *mc =
|
||||
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
|
||||
spin_lock_irqsave(&mal->lock, flags);
|
||||
mal_disable_eob_irq(mal);
|
||||
spin_unlock_irqrestore(&mal->lock, flags);
|
||||
goto again;
|
||||
}
|
||||
mc->ops->poll_tx(mc->dev);
|
||||
}
|
||||
|
||||
@@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data)
|
||||
static int emac_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return 1;
|
||||
case ETH_SS_STATS:
|
||||
return EMAC_STATS_LEN;
|
||||
default:
|
||||
@@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
|
||||
unsigned int i;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
strcpy(data, "single-pause-mode");
|
||||
break;
|
||||
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < EMAC_STATS_LEN; i++) {
|
||||
strlcpy(data, emac_ethtool_stat_strings[i],
|
||||
@@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev)
|
||||
return EMAC_MAX_REG_SIZE * sizeof(u32);
|
||||
}
|
||||
|
||||
#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0)
|
||||
|
||||
static int emac_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
{
|
||||
struct emac_adapter *adpt = netdev_priv(netdev);
|
||||
|
||||
adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE);
|
||||
|
||||
if (netif_running(netdev))
|
||||
return emac_reinit_locked(adpt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 emac_get_priv_flags(struct net_device *netdev)
|
||||
{
|
||||
struct emac_adapter *adpt = netdev_priv(netdev);
|
||||
|
||||
return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops emac_ethtool_ops = {
|
||||
.get_link_ksettings = phy_ethtool_get_link_ksettings,
|
||||
.set_link_ksettings = phy_ethtool_set_link_ksettings,
|
||||
@@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = {
|
||||
|
||||
.get_regs_len = emac_get_regs_len,
|
||||
.get_regs = emac_get_regs,
|
||||
|
||||
.set_priv_flags = emac_set_priv_flags,
|
||||
.get_priv_flags = emac_get_priv_flags,
|
||||
};
|
||||
|
||||
void emac_set_ethtool_ops(struct net_device *netdev)
|
||||
|
||||
@@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt)
|
||||
mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
|
||||
DEBUG_MODE | SINGLE_PAUSE_MODE);
|
||||
|
||||
/* Enable single-pause-frame mode if requested.
|
||||
*
|
||||
* If enabled, the EMAC will send a single pause frame when the RX
|
||||
* queue is full. This normally leads to packet loss because
|
||||
* the pause frame disables the remote MAC only for 33ms (the quanta),
|
||||
* and then the remote MAC continues sending packets even though
|
||||
* the RX queue is still full.
|
||||
*
|
||||
* If disabled, the EMAC sends a pause frame every 31ms until the RX
|
||||
* queue is no longer full. Normally, this is the preferred
|
||||
* method of operation. However, when the system is hung (e.g.
|
||||
* cores are halted), the EMAC interrupt handler is never called
|
||||
* and so the RX queue fills up quickly and stays full. The resuling
|
||||
* non-stop "flood" of pause frames sometimes has the effect of
|
||||
* disabling nearby switches. In some cases, other nearby switches
|
||||
* are also affected, shutting down the entire network.
|
||||
*
|
||||
* The user can enable or disable single-pause-frame mode
|
||||
* via ethtool.
|
||||
*/
|
||||
mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
|
||||
|
||||
writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
|
||||
|
||||
writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
|
||||
|
||||
@@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
|
||||
|
||||
/* default to automatic flow control */
|
||||
adpt->automatic = true;
|
||||
|
||||
/* Disable single-pause-frame mode by default */
|
||||
adpt->single_pause_mode = false;
|
||||
}
|
||||
|
||||
/* Get the clock */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user