mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'net-5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
"Mostly driver fixes.
Current release - regressions:
- Revert "net: Add a second bind table hashed by port and address",
needs more work
- amd-xgbe: use platform_irq_count(), static setup of IRQ resources
had been removed from DT core
- dts: at91: ksz9477_evb: add phy-mode to fix port/phy validation
Current release - new code bugs:
- hns3: modify the ring param print info
Previous releases - always broken:
- axienet: make the 64b addressable DMA depends on 64b architectures
- iavf: fix issue with MAC address of VF shown as zero
- ice: fix PTP TX timestamp offset calculation
- usb: ax88179_178a needs FLAG_SEND_ZLP
Misc:
- document some net.sctp.* sysctls"
* tag 'net-5.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (31 commits)
net: axienet: add missing error return code in axienet_probe()
Revert "net: Add a second bind table hashed by port and address"
net: ax25: Fix deadlock caused by skb_recv_datagram in ax25_recvmsg
net: usb: ax88179_178a needs FLAG_SEND_ZLP
MAINTAINERS: add include/dt-bindings/net to NETWORKING DRIVERS
ARM: dts: at91: ksz9477_evb: fix port/phy validation
net: bgmac: Fix an erroneous kfree() in bgmac_remove()
ice: Fix memory corruption in VF driver
ice: Fix queue config fail handling
ice: Sync VLAN filtering features for DVM
ice: Fix PTP TX timestamp offset calculation
mlxsw: spectrum_cnt: Reorder counter pools
docs: networking: phy: Fix a typo
amd-xgbe: Use platform_irq_count()
octeontx2-vf: Add support for adaptive interrupt coalescing
xilinx: Fix build on x86.
net: axienet: Use iowrite64 to write all 64b descriptor pointers
net: axienet: make the 64b addresable DMA depends on 64b archectures
net: hns3: fix tm port shapping of fibre port is incorrect after driver initialization
net: hns3: fix PF rss size initialization bug
...
This commit is contained in:
@@ -2925,6 +2925,43 @@ plpmtud_probe_interval - INTEGER
|
||||
|
||||
Default: 0
|
||||
|
||||
reconf_enable - BOOLEAN
|
||||
Enable or disable extension of Stream Reconfiguration functionality
|
||||
specified in RFC6525. This extension provides the ability to "reset"
|
||||
a stream, and it includes the Parameters of "Outgoing/Incoming SSN
|
||||
Reset", "SSN/TSN Reset" and "Add Outgoing/Incoming Streams".
|
||||
|
||||
- 1: Enable extension.
|
||||
- 0: Disable extension.
|
||||
|
||||
Default: 0
|
||||
|
||||
intl_enable - BOOLEAN
|
||||
Enable or disable extension of User Message Interleaving functionality
|
||||
specified in RFC8260. This extension allows the interleaving of user
|
||||
messages sent on different streams. With this feature enabled, I-DATA
|
||||
chunk will replace DATA chunk to carry user messages if also supported
|
||||
by the peer. Note that to use this feature, one needs to set this option
|
||||
to 1 and also needs to set socket options SCTP_FRAGMENT_INTERLEAVE to 2
|
||||
and SCTP_INTERLEAVING_SUPPORTED to 1.
|
||||
|
||||
- 1: Enable extension.
|
||||
- 0: Disable extension.
|
||||
|
||||
Default: 0
|
||||
|
||||
ecn_enable - BOOLEAN
|
||||
Control use of Explicit Congestion Notification (ECN) by SCTP.
|
||||
Like in TCP, ECN is used only when both ends of the SCTP connection
|
||||
indicate support for it. This feature is useful in avoiding losses
|
||||
due to congestion by allowing supporting routers to signal congestion
|
||||
before having to drop packets.
|
||||
|
||||
1: Enable ecn.
|
||||
0: Disable ecn.
|
||||
|
||||
Default: 1
|
||||
|
||||
|
||||
``/proc/sys/net/core/*``
|
||||
========================
|
||||
|
||||
@@ -104,7 +104,7 @@ Whenever possible, use the PHY side RGMII delay for these reasons:
|
||||
|
||||
* PHY device drivers in PHYLIB being reusable by nature, being able to
|
||||
configure correctly a specified delay enables more designs with similar delay
|
||||
requirements to be operate correctly
|
||||
requirements to be operated correctly
|
||||
|
||||
For cases where the PHY is not capable of providing this delay, but the
|
||||
Ethernet MAC driver is capable of doing so, the correct phy_interface_t value
|
||||
|
||||
@@ -13800,6 +13800,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: include/dt-bindings/net/
|
||||
F: include/linux/etherdevice.h
|
||||
F: include/linux/fcdevice.h
|
||||
F: include/linux/fddidevice.h
|
||||
|
||||
@@ -120,26 +120,31 @@
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
label = "lan1";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
label = "lan2";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@2 {
|
||||
reg = <2>;
|
||||
label = "lan3";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@3 {
|
||||
reg = <3>;
|
||||
label = "lan4";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@4 {
|
||||
reg = <4>;
|
||||
label = "lan5";
|
||||
phy-mode = "internal";
|
||||
};
|
||||
|
||||
port@5 {
|
||||
|
||||
@@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
|
||||
* the PHY resources listed last
|
||||
*/
|
||||
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
|
||||
phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
|
||||
phy_irqnum = platform_irq_count(pdev) - 1;
|
||||
dma_irqnum = 1;
|
||||
dma_irqend = phy_irqnum;
|
||||
} else {
|
||||
@@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
|
||||
phy_memnum = 0;
|
||||
phy_irqnum = 0;
|
||||
dma_irqnum = 1;
|
||||
dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
|
||||
dma_irqend = platform_irq_count(pdev);
|
||||
}
|
||||
|
||||
/* Obtain the mmio areas for the device */
|
||||
|
||||
@@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core)
|
||||
bcma_mdio_mii_unregister(bgmac->mii_bus);
|
||||
bgmac_enet_remove(bgmac);
|
||||
bcma_set_drvdata(core, NULL);
|
||||
kfree(bgmac);
|
||||
}
|
||||
|
||||
static struct bcma_driver bgmac_bcma_driver = {
|
||||
|
||||
@@ -769,6 +769,7 @@ struct hnae3_tc_info {
|
||||
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
|
||||
u16 tqp_count[HNAE3_MAX_TC];
|
||||
u16 tqp_offset[HNAE3_MAX_TC];
|
||||
u8 max_tc; /* Total number of TCs */
|
||||
u8 num_tc; /* Total number of enabled TCs */
|
||||
bool mqprio_active;
|
||||
};
|
||||
|
||||
@@ -1129,7 +1129,7 @@ hns3_is_ringparam_changed(struct net_device *ndev,
|
||||
if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
|
||||
old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
|
||||
old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
|
||||
netdev_info(ndev, "ringparam not changed\n");
|
||||
netdev_info(ndev, "descriptor number and rx buffer length not changed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -3268,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
|
||||
static int hclge_update_port_info(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_mac *mac = &hdev->hw.mac;
|
||||
int speed = HCLGE_MAC_SPEED_UNKNOWN;
|
||||
int speed;
|
||||
int ret;
|
||||
|
||||
/* get the port info from SFP cmd if not copper port */
|
||||
@@ -3279,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
|
||||
if (!hdev->support_sfp_query)
|
||||
return 0;
|
||||
|
||||
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
|
||||
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
|
||||
speed = mac->speed;
|
||||
ret = hclge_get_sfp_info(hdev, mac);
|
||||
else
|
||||
} else {
|
||||
speed = HCLGE_MAC_SPEED_UNKNOWN;
|
||||
ret = hclge_get_sfp_speed(hdev, &speed);
|
||||
}
|
||||
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
hdev->support_sfp_query = false;
|
||||
@@ -3294,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
|
||||
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
|
||||
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
|
||||
hclge_update_port_capability(hdev, mac);
|
||||
if (mac->speed != speed)
|
||||
(void)hclge_tm_port_shaper_cfg(hdev);
|
||||
return 0;
|
||||
}
|
||||
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
|
||||
@@ -3376,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
|
||||
link_state_old = vport->vf_info.link_state;
|
||||
vport->vf_info.link_state = link_state;
|
||||
|
||||
/* return success directly if the VF is unalive, VF will
|
||||
* query link state itself when it starts work.
|
||||
*/
|
||||
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
|
||||
return 0;
|
||||
|
||||
ret = hclge_push_vf_link_status(vport);
|
||||
if (ret) {
|
||||
vport->vf_info.link_state = link_state_old;
|
||||
@@ -10117,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vport->port_base_vlan_cfg.tbl_sta = false;
|
||||
/* remove old VLAN tag */
|
||||
if (old_info->vlan_tag == 0)
|
||||
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
|
||||
|
||||
@@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
|
||||
static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
|
||||
u16 qs_id, u8 pri)
|
||||
static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
|
||||
bool link_vld)
|
||||
{
|
||||
struct hclge_qs_to_pri_link_cmd *map;
|
||||
struct hclge_desc desc;
|
||||
@@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
|
||||
|
||||
map->qs_id = cpu_to_le16(qs_id);
|
||||
map->priority = pri;
|
||||
map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
|
||||
map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
@@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
|
||||
static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
|
||||
int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_port_shapping_cmd *shap_cfg_cmd;
|
||||
struct hclge_shaper_ir_para ir_para;
|
||||
@@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
|
||||
* one tc for VF for simplicity. VF's vport_id is non zero.
|
||||
*/
|
||||
if (vport->vport_id) {
|
||||
kinfo->tc_info.max_tc = 1;
|
||||
kinfo->tc_info.num_tc = 1;
|
||||
vport->qs_offset = HNAE3_MAX_TC +
|
||||
vport->vport_id - HCLGE_VF_VPORT_START_NUM;
|
||||
vport_max_rss_size = hdev->vf_rss_size_max;
|
||||
} else {
|
||||
kinfo->tc_info.max_tc = hdev->tc_max;
|
||||
kinfo->tc_info.num_tc =
|
||||
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
|
||||
vport->qs_offset = 0;
|
||||
@@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
|
||||
vport->dwrr = 100; /* 100 percent as init */
|
||||
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
|
||||
hdev->rss_cfg.rss_size = kinfo->rss_size;
|
||||
|
||||
if (vport->vport_id == PF_VPORT_ID)
|
||||
hdev->rss_cfg.rss_size = kinfo->rss_size;
|
||||
|
||||
/* when enable mqprio, the tc_info has been updated. */
|
||||
if (kinfo->tc_info.mqprio_active)
|
||||
@@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
|
||||
|
||||
static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
|
||||
{
|
||||
u8 i;
|
||||
u8 i, tc_sch_mode;
|
||||
u32 bw_limit;
|
||||
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
if (i < hdev->tm_info.num_tc) {
|
||||
tc_sch_mode = HCLGE_SCH_MODE_DWRR;
|
||||
bw_limit = hdev->tm_info.pg_info[0].bw_limit;
|
||||
} else {
|
||||
tc_sch_mode = HCLGE_SCH_MODE_SP;
|
||||
bw_limit = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
hdev->tm_info.tc_info[i].tc_id = i;
|
||||
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
|
||||
hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
|
||||
hdev->tm_info.tc_info[i].pgid = 0;
|
||||
hdev->tm_info.tc_info[i].bw_limit =
|
||||
hdev->tm_info.pg_info[0].bw_limit;
|
||||
hdev->tm_info.tc_info[i].bw_limit = bw_limit;
|
||||
}
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
@@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
|
||||
for (k = 0; k < hdev->num_alloc_vport; k++) {
|
||||
struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
|
||||
|
||||
for (i = 0; i < kinfo->tc_info.num_tc; i++) {
|
||||
for (i = 0; i < kinfo->tc_info.max_tc; i++) {
|
||||
u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
|
||||
bool link_vld = i < kinfo->tc_info.num_tc;
|
||||
|
||||
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
|
||||
vport[k].qs_offset + i,
|
||||
i);
|
||||
pri, link_vld);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
|
||||
vport[k].qs_offset + i,
|
||||
k);
|
||||
k, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
|
||||
{
|
||||
u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
|
||||
struct hclge_shaper_ir_para ir_para;
|
||||
u32 shaper_para;
|
||||
u32 shaper_para_c, shaper_para_p;
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
u32 rate = hdev->tm_info.tc_info[i].bw_limit;
|
||||
|
||||
ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
|
||||
&ir_para, max_tm_rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (rate) {
|
||||
ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
|
||||
&ir_para, max_tm_rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
|
||||
ir_para.ir_u,
|
||||
ir_para.ir_s,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
} else {
|
||||
shaper_para_c = 0;
|
||||
shaper_para_p = 0;
|
||||
}
|
||||
|
||||
shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
|
||||
shaper_para, rate);
|
||||
shaper_para_c, rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
|
||||
ir_para.ir_u,
|
||||
ir_para.ir_s,
|
||||
HCLGE_SHAPER_BS_U_DEF,
|
||||
HCLGE_SHAPER_BS_S_DEF);
|
||||
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
|
||||
shaper_para, rate);
|
||||
shaper_para_p, rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
|
||||
int ret;
|
||||
u32 i, k;
|
||||
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
pg_info =
|
||||
&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
|
||||
dwrr = pg_info->tc_dwrr[i];
|
||||
@@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
|
||||
return ret;
|
||||
|
||||
for (k = 0; k < hdev->num_alloc_vport; k++) {
|
||||
struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
|
||||
|
||||
if (i >= kinfo->tc_info.max_tc)
|
||||
continue;
|
||||
|
||||
dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
|
||||
ret = hclge_tm_qs_weight_cfg(
|
||||
hdev, vport[k].qs_offset + i,
|
||||
vport[k].dwrr);
|
||||
dwrr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
|
||||
{
|
||||
struct hclge_vport *vport = hdev->vport;
|
||||
int ret;
|
||||
u8 mode;
|
||||
u16 i;
|
||||
|
||||
ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
|
||||
@@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < hdev->num_alloc_vport; i++) {
|
||||
struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
|
||||
|
||||
if (pri_id >= kinfo->tc_info.max_tc)
|
||||
continue;
|
||||
|
||||
mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
|
||||
HCLGE_SCH_MODE_SP;
|
||||
ret = hclge_tm_qs_schd_mode_cfg(hdev,
|
||||
vport[i].qs_offset + pri_id,
|
||||
HCLGE_SCH_MODE_DWRR);
|
||||
mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
|
||||
u8 i;
|
||||
|
||||
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (i = 0; i < hdev->tc_max; i++) {
|
||||
ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
|
||||
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
||||
void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
|
||||
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
|
||||
int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
|
||||
int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
|
||||
int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
|
||||
int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
|
||||
|
||||
@@ -2586,15 +2586,16 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
|
||||
set_bit(__I40E_TESTING, pf->state);
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Cannot start offline testing when PF is in reset state.\n");
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
@@ -2641,9 +2642,17 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
data[I40E_ETH_TEST_INTR] = 0;
|
||||
}
|
||||
|
||||
skip_ol_tests:
|
||||
|
||||
netif_info(pf, drv, netdev, "testing finished\n");
|
||||
return;
|
||||
|
||||
skip_ol_tests:
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
netif_info(pf, drv, netdev, "testing failed\n");
|
||||
}
|
||||
|
||||
static void i40e_get_wol(struct net_device *netdev,
|
||||
|
||||
@@ -8542,6 +8542,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!tc) {
|
||||
dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
|
||||
return -EBUSY;
|
||||
|
||||
@@ -2282,7 +2282,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
}
|
||||
|
||||
if (vf->adq_enabled) {
|
||||
for (i = 0; i < I40E_MAX_VF_VSI; i++)
|
||||
for (i = 0; i < vf->num_tc; i++)
|
||||
num_qps_all += vf->ch[i].num_qps;
|
||||
if (num_qps_all != qci->num_queue_pairs) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
|
||||
@@ -984,7 +984,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
||||
list_add_tail(&f->list, &adapter->mac_filter_list);
|
||||
f->add = true;
|
||||
f->is_new_mac = true;
|
||||
f->is_primary = false;
|
||||
f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
} else {
|
||||
f->remove = false;
|
||||
|
||||
@@ -5763,25 +5763,38 @@ static netdev_features_t
|
||||
ice_fix_features(struct net_device *netdev, netdev_features_t features)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
netdev_features_t supported_vlan_filtering;
|
||||
netdev_features_t requested_vlan_filtering;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
netdev_features_t req_vlan_fltr, cur_vlan_fltr;
|
||||
bool cur_ctag, cur_stag, req_ctag, req_stag;
|
||||
|
||||
requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES;
|
||||
cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
|
||||
cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
|
||||
/* make sure supported_vlan_filtering works for both SVM and DVM */
|
||||
supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
if (ice_is_dvm_ena(&vsi->back->hw))
|
||||
supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
|
||||
req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
|
||||
if (requested_vlan_filtering &&
|
||||
requested_vlan_filtering != supported_vlan_filtering) {
|
||||
if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) {
|
||||
netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n");
|
||||
features |= supported_vlan_filtering;
|
||||
if (req_vlan_fltr != cur_vlan_fltr) {
|
||||
if (ice_is_dvm_ena(&np->vsi->back->hw)) {
|
||||
if (req_ctag && req_stag) {
|
||||
features |= NETIF_VLAN_FILTERING_FEATURES;
|
||||
} else if (!req_ctag && !req_stag) {
|
||||
features &= ~NETIF_VLAN_FILTERING_FEATURES;
|
||||
} else if ((!cur_ctag && req_ctag && !cur_stag) ||
|
||||
(!cur_stag && req_stag && !cur_ctag)) {
|
||||
features |= NETIF_VLAN_FILTERING_FEATURES;
|
||||
netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
|
||||
} else if ((cur_ctag && !req_ctag && cur_stag) ||
|
||||
(cur_stag && !req_stag && cur_ctag)) {
|
||||
features &= ~NETIF_VLAN_FILTERING_FEATURES;
|
||||
netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
|
||||
}
|
||||
} else {
|
||||
netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n");
|
||||
features &= ~supported_vlan_filtering;
|
||||
if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
|
||||
netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
|
||||
|
||||
if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2271,7 +2271,7 @@ static int
|
||||
ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
|
||||
{
|
||||
tx->quad = port / ICE_PORTS_PER_QUAD;
|
||||
tx->quad_offset = tx->quad * INDEX_PER_PORT;
|
||||
tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
|
||||
tx->len = INDEX_PER_PORT;
|
||||
|
||||
return ice_ptp_alloc_tx_tracker(tx);
|
||||
|
||||
@@ -49,6 +49,37 @@ struct ice_perout_channel {
|
||||
* To allow multiple ports to access the shared register block independently,
|
||||
* the blocks are split up so that indexes are assigned to each port based on
|
||||
* hardware logical port number.
|
||||
*
|
||||
* The timestamp blocks are handled differently for E810- and E822-based
|
||||
* devices. In E810 devices, each port has its own block of timestamps, while in
|
||||
* E822 there is a need to logically break the block of registers into smaller
|
||||
* chunks based on the port number to avoid collisions.
|
||||
*
|
||||
* Example for port 5 in E810:
|
||||
* +--------+--------+--------+--------+--------+--------+--------+--------+
|
||||
* |register|register|register|register|register|register|register|register|
|
||||
* | block | block | block | block | block | block | block | block |
|
||||
* | for | for | for | for | for | for | for | for |
|
||||
* | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 |
|
||||
* +--------+--------+--------+--------+--------+--------+--------+--------+
|
||||
* ^^
|
||||
* ||
|
||||
* |--- quad offset is always 0
|
||||
* ---- quad number
|
||||
*
|
||||
* Example for port 5 in E822:
|
||||
* +-----------------------------+-----------------------------+
|
||||
* | register block for quad 0 | register block for quad 1 |
|
||||
* |+------+------+------+------+|+------+------+------+------+|
|
||||
* ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3||
|
||||
* |+------+------+------+------+|+------+------+------+------+|
|
||||
* +-----------------------------+-------^---------------------+
|
||||
* ^ |
|
||||
* | --- quad offset*
|
||||
* ---- quad number
|
||||
*
|
||||
* * PHY port 5 is port 1 in quad 1
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
|
||||
@@ -504,6 +504,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
|
||||
}
|
||||
|
||||
if (ice_is_vf_disabled(vf)) {
|
||||
vsi = ice_get_vf_vsi(vf);
|
||||
if (WARN_ON(!vsi))
|
||||
return -EINVAL;
|
||||
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
|
||||
ice_vsi_stop_all_rx_rings(vsi);
|
||||
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
|
||||
vf->vf_id);
|
||||
return 0;
|
||||
|
||||
@@ -1569,35 +1569,27 @@ error_param:
|
||||
*/
|
||||
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
{
|
||||
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
|
||||
struct virtchnl_vsi_queue_config_info *qci =
|
||||
(struct virtchnl_vsi_queue_config_info *)msg;
|
||||
struct virtchnl_queue_pair_info *qpi;
|
||||
struct ice_pf *pf = vf->pf;
|
||||
struct ice_vsi *vsi;
|
||||
int i, q_idx;
|
||||
int i = -1, q_idx;
|
||||
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi = ice_get_vf_vsi(vf);
|
||||
if (!vsi) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (!vsi)
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
|
||||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
|
||||
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
|
||||
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
|
||||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
|
||||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
* for selected "vsi"
|
||||
*/
|
||||
if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
@@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
vsi->tx_rings[i]->count = qpi->txq.ring_len;
|
||||
|
||||
/* Disable any existing queue first */
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
/* Configure a queue with the requested settings */
|
||||
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
goto error_param;
|
||||
}
|
||||
}
|
||||
@@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
|
||||
if (qpi->rxq.databuffer_size != 0 &&
|
||||
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
|
||||
qpi->rxq.databuffer_size < 1024)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
qpi->rxq.databuffer_size < 1024))
|
||||
goto error_param;
|
||||
}
|
||||
vsi->rx_buf_len = qpi->rxq.databuffer_size;
|
||||
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
|
||||
if (qpi->rxq.max_pkt_size > max_frame_size ||
|
||||
qpi->rxq.max_pkt_size < 64) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
qpi->rxq.max_pkt_size < 64)
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
vsi->max_frame = qpi->rxq.max_pkt_size;
|
||||
/* add space for the port VLAN since the VF driver is
|
||||
@@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
vsi->max_frame += VLAN_HLEN;
|
||||
|
||||
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
goto error_param;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
|
||||
NULL, 0);
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
|
||||
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
|
||||
error_param:
|
||||
/* disable whatever we can */
|
||||
for (; i >= 0; i--) {
|
||||
if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
|
||||
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
|
||||
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
|
||||
vf->vf_id, i);
|
||||
}
|
||||
|
||||
/* send the response to the VF */
|
||||
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
|
||||
VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user