You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix null deref in xt_TEE netfilter module, from Eric Dumazet.
2) Several spots need to get to the original listner for SYN-ACK
packets, most spots got this ok but some were not. Whilst covering
the remaining cases, create a helper to do this. From Eric Dumazet.
3) Missiing check of return value from alloc_netdev() in CAIF SPI code,
from Rasmus Villemoes.
4) Don't sleep while != TASK_RUNNING in macvtap, from Vlad Yasevich.
5) Use after free in mvneta driver, from Justin Maggard.
6) Fix race on dst->flags access in dst_release(), from Eric Dumazet.
7) Add missing ZLIB_INFLATE dependency for new qed driver. From Arnd
Bergmann.
8) Fix multicast getsockopt deadlock, from WANG Cong.
9) Fix deadlock in btusb, from Kuba Pawlak.
10) Some ipv6_add_dev() failure paths were not cleaning up the SNMP6
counter state. From Sabrina Dubroca.
11) Fix packet_bind() race, which can cause lost notifications, from
Francesco Ruggeri.
12) Fix MAC restoration in qlcnic driver during bonding mode changes,
from Jarod Wilson.
13) Revert bridging forward delay change which broke libvirt and other
userspace things, from Vlad Yasevich.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits)
Revert "bridge: Allow forward delay to be cfgd when STP enabled"
bpf_trace: Make dependent on PERF_EVENTS
qed: select ZLIB_INFLATE
net: fix a race in dst_release()
net: mvneta: Fix memory use after free.
net: Documentation: Fix default value tcp_limit_output_bytes
macvtap: Resolve possible __might_sleep warning in macvtap_do_read()
mvneta: add FIXED_PHY dependency
net: caif: check return value of alloc_netdev
net: hisilicon: NET_VENDOR_HISILICON should depend on HAS_DMA
drivers: net: xgene: fix RGMII 10/100Mb mode
netfilter: nft_meta: use skb_to_full_sk() helper
net_sched: em_meta: use skb_to_full_sk() helper
sched: cls_flow: use skb_to_full_sk() helper
netfilter: xt_owner: use skb_to_full_sk() helper
smack: use skb_to_full_sk() helper
net: add skb_to_full_sk() helper and use it in selinux_netlbl_skbuff_setsid()
bpf: doc: correct arch list for supported eBPF JIT
dwc_eth_qos: Delete an unnecessary check before the function call "of_node_put"
bonding: fix panic on non-ARPHRD_ETHER enslave failure
...
This commit is contained in:
@@ -48,6 +48,11 @@ Optional properties:
|
||||
- mac-address : See ethernet.txt file in the same directory
|
||||
- phy-handle : See ethernet.txt file in the same directory
|
||||
|
||||
Slave sub-nodes:
|
||||
- fixed-link : See fixed-link.txt file in the same directory
|
||||
Either the properties phy_id and phy-mode,
|
||||
or the sub-node fixed-link can be specified
|
||||
|
||||
Note: "ti,hwmods" field is used to fetch the base address and irq
|
||||
resources from TI, omap hwmod data base during device registration.
|
||||
Future plan is to migrate hwmod data base contents into device tree
|
||||
|
||||
@@ -596,9 +596,9 @@ skb pointer). All constraints and restrictions from bpf_check_classic() apply
|
||||
before a conversion to the new layout is being done behind the scenes!
|
||||
|
||||
Currently, the classic BPF format is being used for JITing on most of the
|
||||
architectures. Only x86-64 performs JIT compilation from eBPF instruction set,
|
||||
however, future work will migrate other JIT compilers as well, so that they
|
||||
will profit from the very same benefits.
|
||||
architectures. x86-64, aarch64 and s390x perform JIT compilation from eBPF
|
||||
instruction set, however, future work will migrate other JIT compilers as well,
|
||||
so that they will profit from the very same benefits.
|
||||
|
||||
Some core changes of the new internal format:
|
||||
|
||||
|
||||
@@ -709,7 +709,7 @@ tcp_limit_output_bytes - INTEGER
|
||||
typical pfifo_fast qdiscs.
|
||||
tcp_limit_output_bytes limits the number of bytes on qdisc
|
||||
or device to reduce artificial RTT/cwnd and reduce bufferbloat.
|
||||
Default: 131072
|
||||
Default: 262144
|
||||
|
||||
tcp_challenge_ack_limit - INTEGER
|
||||
Limits number of Challenge ACK sent per second, as recommended
|
||||
|
||||
@@ -1372,6 +1372,8 @@ static void btusb_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (data->isoc_altsetting != new_alts) {
|
||||
unsigned long flags;
|
||||
|
||||
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
|
||||
usb_kill_anchored_urbs(&data->isoc_anchor);
|
||||
|
||||
@@ -1384,10 +1386,10 @@ static void btusb_work(struct work_struct *work)
|
||||
* Clear outstanding fragment when selecting a new
|
||||
* alternate setting.
|
||||
*/
|
||||
spin_lock(&data->rxlock);
|
||||
spin_lock_irqsave(&data->rxlock, flags);
|
||||
kfree_skb(data->sco_skb);
|
||||
data->sco_skb = NULL;
|
||||
spin_unlock(&data->rxlock);
|
||||
spin_unlock_irqrestore(&data->rxlock, flags);
|
||||
|
||||
if (__set_isoc_interface(hdev, new_alts) < 0)
|
||||
return;
|
||||
|
||||
@@ -1749,6 +1749,7 @@ err_undo_flags:
|
||||
slave_dev->dev_addr))
|
||||
eth_hw_addr_random(bond_dev);
|
||||
if (bond_dev->type != ARPHRD_ETHER) {
|
||||
dev_close(bond_dev);
|
||||
ether_setup(bond_dev);
|
||||
bond_dev->flags |= IFF_MASTER;
|
||||
bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||
|
||||
@@ -730,11 +730,14 @@ int cfspi_spi_probe(struct platform_device *pdev)
|
||||
int res;
|
||||
dev = (struct cfspi_dev *)pdev->dev.platform_data;
|
||||
|
||||
ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
|
||||
NET_NAME_UNKNOWN, cfspi_setup);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
|
||||
NET_NAME_UNKNOWN, cfspi_setup);
|
||||
if (!ndev)
|
||||
return -ENOMEM;
|
||||
|
||||
cfspi = netdev_priv(ndev);
|
||||
netif_stop_queue(ndev);
|
||||
cfspi->ndev = ndev;
|
||||
|
||||
@@ -103,6 +103,8 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
|
||||
#endif
|
||||
.get_regs_len = mv88e6xxx_get_regs_len,
|
||||
.get_regs = mv88e6xxx_get_regs,
|
||||
.port_join_bridge = mv88e6xxx_port_bridge_join,
|
||||
.port_leave_bridge = mv88e6xxx_port_bridge_leave,
|
||||
.port_stp_update = mv88e6xxx_port_stp_update,
|
||||
.port_pvid_get = mv88e6xxx_port_pvid_get,
|
||||
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
|
||||
|
||||
@@ -323,6 +323,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
|
||||
.set_eeprom = mv88e6352_set_eeprom,
|
||||
.get_regs_len = mv88e6xxx_get_regs_len,
|
||||
.get_regs = mv88e6xxx_get_regs,
|
||||
.port_join_bridge = mv88e6xxx_port_bridge_join,
|
||||
.port_leave_bridge = mv88e6xxx_port_bridge_leave,
|
||||
.port_stp_update = mv88e6xxx_port_stp_update,
|
||||
.port_pvid_get = mv88e6xxx_port_pvid_get,
|
||||
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
|
||||
|
||||
@@ -1462,6 +1462,10 @@ int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct switchdev_trans *trans)
|
||||
{
|
||||
/* We reserve a few VLANs to isolate unbridged ports */
|
||||
if (vlan->vid_end >= 4000)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* We don't need any dynamic resource from the kernel (yet),
|
||||
* so skip the prepare phase.
|
||||
*/
|
||||
@@ -1870,6 +1874,36 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
|
||||
{
|
||||
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
||||
const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
|
||||
int err;
|
||||
|
||||
/* The port joined a bridge, so leave its reserved VLAN */
|
||||
mutex_lock(&ps->smi_mutex);
|
||||
err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
|
||||
if (!err)
|
||||
err = _mv88e6xxx_port_pvid_set(ds, port, 0);
|
||||
mutex_unlock(&ps->smi_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
|
||||
{
|
||||
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
||||
const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
|
||||
int err;
|
||||
|
||||
/* The port left the bridge, so join its reserved VLAN */
|
||||
mutex_lock(&ps->smi_mutex);
|
||||
err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
|
||||
if (!err)
|
||||
err = _mv88e6xxx_port_pvid_set(ds, port, pvid);
|
||||
mutex_unlock(&ps->smi_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mv88e6xxx_bridge_work(struct work_struct *work)
|
||||
{
|
||||
struct mv88e6xxx_priv_state *ps;
|
||||
@@ -2140,6 +2174,14 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
|
||||
ret = mv88e6xxx_setup_port(ds, i);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
|
||||
continue;
|
||||
|
||||
/* setup the unbridged state */
|
||||
ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -468,6 +468,8 @@ int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
|
||||
int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
|
||||
int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
|
||||
struct phy_device *phydev, struct ethtool_eee *e);
|
||||
int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members);
|
||||
int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members);
|
||||
int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
|
||||
int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
|
||||
@@ -459,6 +459,45 @@ static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
|
||||
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
|
||||
}
|
||||
|
||||
static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
|
||||
{
|
||||
struct device *dev = &pdata->pdev->dev;
|
||||
|
||||
if (dev->of_node) {
|
||||
struct clk *parent = clk_get_parent(pdata->clk);
|
||||
|
||||
switch (pdata->phy_speed) {
|
||||
case SPEED_10:
|
||||
clk_set_rate(parent, 2500000);
|
||||
break;
|
||||
case SPEED_100:
|
||||
clk_set_rate(parent, 25000000);
|
||||
break;
|
||||
default:
|
||||
clk_set_rate(parent, 125000000);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_ACPI
|
||||
else {
|
||||
switch (pdata->phy_speed) {
|
||||
case SPEED_10:
|
||||
acpi_evaluate_object(ACPI_HANDLE(dev),
|
||||
"S10", NULL, NULL);
|
||||
break;
|
||||
case SPEED_100:
|
||||
acpi_evaluate_object(ACPI_HANDLE(dev),
|
||||
"S100", NULL, NULL);
|
||||
break;
|
||||
default:
|
||||
acpi_evaluate_object(ACPI_HANDLE(dev),
|
||||
"S1G", NULL, NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
|
||||
{
|
||||
struct device *dev = &pdata->pdev->dev;
|
||||
@@ -477,12 +516,14 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
|
||||
switch (pdata->phy_speed) {
|
||||
case SPEED_10:
|
||||
ENET_INTERFACE_MODE2_SET(&mc2, 1);
|
||||
intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
|
||||
CFG_MACMODE_SET(&icm0, 0);
|
||||
CFG_WAITASYNCRD_SET(&icm2, 500);
|
||||
rgmii &= ~CFG_SPEED_1250;
|
||||
break;
|
||||
case SPEED_100:
|
||||
ENET_INTERFACE_MODE2_SET(&mc2, 1);
|
||||
intf_ctl &= ~ENET_GHD_MODE;
|
||||
intf_ctl |= ENET_LHD_MODE;
|
||||
CFG_MACMODE_SET(&icm0, 1);
|
||||
CFG_WAITASYNCRD_SET(&icm2, 80);
|
||||
@@ -490,12 +531,15 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
|
||||
break;
|
||||
default:
|
||||
ENET_INTERFACE_MODE2_SET(&mc2, 2);
|
||||
intf_ctl &= ~ENET_LHD_MODE;
|
||||
intf_ctl |= ENET_GHD_MODE;
|
||||
|
||||
CFG_MACMODE_SET(&icm0, 2);
|
||||
CFG_WAITASYNCRD_SET(&icm2, 0);
|
||||
if (dev->of_node) {
|
||||
CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
|
||||
CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
|
||||
}
|
||||
rgmii |= CFG_SPEED_1250;
|
||||
|
||||
xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
|
||||
value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
|
||||
@@ -503,7 +547,7 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
|
||||
break;
|
||||
}
|
||||
|
||||
mc2 |= FULL_DUPLEX2;
|
||||
mc2 |= FULL_DUPLEX2 | PAD_CRC;
|
||||
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
|
||||
xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
|
||||
|
||||
@@ -522,6 +566,7 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
|
||||
/* Rtype should be copied from FP */
|
||||
xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
|
||||
xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
|
||||
xgene_enet_configure_clock(pdata);
|
||||
|
||||
/* Rx-Tx traffic resume */
|
||||
xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
|
||||
|
||||
@@ -181,6 +181,7 @@ enum xgene_enet_rm {
|
||||
#define ENET_LHD_MODE BIT(25)
|
||||
#define ENET_GHD_MODE BIT(26)
|
||||
#define FULL_DUPLEX2 BIT(0)
|
||||
#define PAD_CRC BIT(2)
|
||||
#define SCAN_AUTO_INCR BIT(5)
|
||||
#define TBYT_ADDR 0x38
|
||||
#define TPKT_ADDR 0x39
|
||||
|
||||
@@ -698,7 +698,6 @@ static int xgene_enet_open(struct net_device *ndev)
|
||||
else
|
||||
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
|
||||
|
||||
netif_carrier_off(ndev);
|
||||
netif_start_queue(ndev);
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -173,6 +173,7 @@ config SYSTEMPORT
|
||||
config BNXT
|
||||
tristate "Broadcom NetXtreme-C/E support"
|
||||
depends on PCI
|
||||
depends on VXLAN || VXLAN=n
|
||||
select FW_LOADER
|
||||
select LIBCRC32C
|
||||
---help---
|
||||
|
||||
@@ -1292,8 +1292,6 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
|
||||
return TX_CMP_VALID(txcmp, raw_cons);
|
||||
}
|
||||
|
||||
#define CAG_LEGACY_INT_STATUS 0x2014
|
||||
|
||||
static irqreturn_t bnxt_inta(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnxt_napi *bnapi = dev_instance;
|
||||
@@ -1305,7 +1303,7 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
|
||||
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
|
||||
|
||||
if (!bnxt_has_work(bp, cpr)) {
|
||||
int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
|
||||
int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
|
||||
/* return if erroneous interrupt */
|
||||
if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
|
||||
return IRQ_NONE;
|
||||
@@ -4527,10 +4525,25 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Common routine to pre-map certain register block to different GRC window.
|
||||
* A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
|
||||
* in PF and 3 windows in VF that can be customized to map in different
|
||||
* register blocks.
|
||||
*/
|
||||
static void bnxt_preset_reg_win(struct bnxt *bp)
|
||||
{
|
||||
if (BNXT_PF(bp)) {
|
||||
/* CAG registers map to GRC window #4 */
|
||||
writel(BNXT_CAG_REG_BASE,
|
||||
bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
|
||||
}
|
||||
}
|
||||
|
||||
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
bnxt_preset_reg_win(bp);
|
||||
netif_carrier_off(bp->dev);
|
||||
if (irq_re_init) {
|
||||
rc = bnxt_setup_int_mode(bp);
|
||||
@@ -5294,7 +5307,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||
struct bnxt_ntuple_filter *fltr, *new_fltr;
|
||||
struct flow_keys *fkeys;
|
||||
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
|
||||
int rc = 0, idx;
|
||||
int rc = 0, idx, bit_id;
|
||||
struct hlist_head *head;
|
||||
|
||||
if (skb->encapsulation)
|
||||
@@ -5332,14 +5345,15 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||
rcu_read_unlock();
|
||||
|
||||
spin_lock_bh(&bp->ntp_fltr_lock);
|
||||
new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
|
||||
BNXT_NTP_FLTR_MAX_FLTR, 0);
|
||||
if (new_fltr->sw_id < 0) {
|
||||
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
|
||||
BNXT_NTP_FLTR_MAX_FLTR, 0);
|
||||
if (bit_id < 0) {
|
||||
spin_unlock_bh(&bp->ntp_fltr_lock);
|
||||
rc = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
new_fltr->sw_id = (u16)bit_id;
|
||||
new_fltr->flow_id = flow_id;
|
||||
new_fltr->rxq = rxq_index;
|
||||
hlist_add_head_rcu(&new_fltr->hash, head);
|
||||
|
||||
@@ -166,9 +166,11 @@ struct rx_cmp {
|
||||
#define RX_CMP_HASH_VALID(rxcmp) \
|
||||
((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
|
||||
|
||||
#define RSS_PROFILE_ID_MASK 0x1f
|
||||
|
||||
#define RX_CMP_HASH_TYPE(rxcmp) \
|
||||
((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
|
||||
RX_CMP_RSS_HASH_TYPE_SHIFT)
|
||||
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
|
||||
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
|
||||
|
||||
struct rx_cmp_ext {
|
||||
__le32 rx_cmp_flags2;
|
||||
@@ -282,9 +284,9 @@ struct rx_tpa_start_cmp {
|
||||
cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
|
||||
|
||||
#define TPA_START_HASH_TYPE(rx_tpa_start) \
|
||||
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
|
||||
RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
|
||||
RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
|
||||
(((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
|
||||
RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
|
||||
RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
|
||||
|
||||
#define TPA_START_AGG_ID(rx_tpa_start) \
|
||||
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
|
||||
@@ -839,6 +841,10 @@ struct bnxt_queue_info {
|
||||
u8 queue_profile;
|
||||
};
|
||||
|
||||
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
|
||||
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
|
||||
#define BNXT_CAG_REG_BASE 0x300000
|
||||
|
||||
struct bnxt {
|
||||
void __iomem *bar0;
|
||||
void __iomem *bar1;
|
||||
@@ -959,11 +965,11 @@ struct bnxt {
|
||||
#define BNXT_RX_MASK_SP_EVENT 0
|
||||
#define BNXT_RX_NTP_FLTR_SP_EVENT 1
|
||||
#define BNXT_LINK_CHNG_SP_EVENT 2
|
||||
#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4
|
||||
#define BNXT_VXLAN_ADD_PORT_SP_EVENT 8
|
||||
#define BNXT_VXLAN_DEL_PORT_SP_EVENT 16
|
||||
#define BNXT_RESET_TASK_SP_EVENT 32
|
||||
#define BNXT_RST_RING_SP_EVENT 64
|
||||
#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3
|
||||
#define BNXT_VXLAN_ADD_PORT_SP_EVENT 4
|
||||
#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
|
||||
#define BNXT_RESET_TASK_SP_EVENT 6
|
||||
#define BNXT_RST_RING_SP_EVENT 7
|
||||
|
||||
struct bnxt_pf_info pf;
|
||||
#ifdef CONFIG_BNXT_SRIOV
|
||||
|
||||
@@ -258,7 +258,7 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
|
||||
static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
|
||||
{
|
||||
int i, rc = 0;
|
||||
struct bnxt_pf_info *pf = &bp->pf;
|
||||
@@ -267,7 +267,7 @@ static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
|
||||
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
|
||||
for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
|
||||
req.vf_id = cpu_to_le16(i);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req),
|
||||
HWRM_CMD_TIMEOUT);
|
||||
@@ -509,7 +509,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
|
||||
|
||||
err_out2:
|
||||
/* Free the resources reserved for various VF's */
|
||||
bnxt_hwrm_func_vf_resource_free(bp);
|
||||
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
|
||||
|
||||
err_out1:
|
||||
bnxt_free_vf_resources(bp);
|
||||
@@ -519,13 +519,19 @@ err_out1:
|
||||
|
||||
void bnxt_sriov_disable(struct bnxt *bp)
|
||||
{
|
||||
if (!bp->pf.active_vfs)
|
||||
u16 num_vfs = pci_num_vf(bp->pdev);
|
||||
|
||||
if (!num_vfs)
|
||||
return;
|
||||
|
||||
pci_disable_sriov(bp->pdev);
|
||||
|
||||
/* Free the resources reserved for various VF's */
|
||||
bnxt_hwrm_func_vf_resource_free(bp);
|
||||
if (pci_vfs_assigned(bp->pdev)) {
|
||||
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
|
||||
num_vfs);
|
||||
} else {
|
||||
pci_disable_sriov(bp->pdev);
|
||||
/* Free the HW resources reserved for various VF's */
|
||||
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
|
||||
}
|
||||
|
||||
bnxt_free_vf_resources(bp);
|
||||
|
||||
@@ -552,17 +558,25 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
}
|
||||
bp->sriov_cfg = true;
|
||||
rtnl_unlock();
|
||||
if (!num_vfs) {
|
||||
bnxt_sriov_disable(bp);
|
||||
return 0;
|
||||
|
||||
if (pci_vfs_assigned(bp->pdev)) {
|
||||
netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
|
||||
num_vfs = 0;
|
||||
goto sriov_cfg_exit;
|
||||
}
|
||||
|
||||
/* Check if enabled VFs is same as requested */
|
||||
if (num_vfs == bp->pf.active_vfs)
|
||||
return 0;
|
||||
if (num_vfs && num_vfs == bp->pf.active_vfs)
|
||||
goto sriov_cfg_exit;
|
||||
|
||||
/* if there are previous existing VFs, clean them up */
|
||||
bnxt_sriov_disable(bp);
|
||||
if (!num_vfs)
|
||||
goto sriov_cfg_exit;
|
||||
|
||||
bnxt_sriov_enable(bp, &num_vfs);
|
||||
|
||||
sriov_cfg_exit:
|
||||
bp->sriov_cfg = false;
|
||||
wake_up(&bp->sriov_cfg_wait);
|
||||
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
config NET_VENDOR_HISILICON
|
||||
bool "Hisilicon devices"
|
||||
default y
|
||||
depends on OF && (ARM || ARM64 || COMPILE_TEST)
|
||||
depends on OF && HAS_DMA
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
---help---
|
||||
If you have a network (Ethernet) card belonging to this class, say Y.
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ config MVNETA
|
||||
tristate "Marvell Armada 370/38x/XP network interface support"
|
||||
depends on PLAT_ORION
|
||||
select MVMDIO
|
||||
select FIXED_PHY
|
||||
---help---
|
||||
This driver supports the network interface units in the
|
||||
Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
|
||||
|
||||
@@ -1493,9 +1493,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
|
||||
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
|
||||
void *data = (void *)rx_desc->buf_cookie;
|
||||
|
||||
mvneta_frag_free(pp, data);
|
||||
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
|
||||
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
|
||||
mvneta_frag_free(pp, data);
|
||||
}
|
||||
|
||||
if (rx_done)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user