You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Verify netlink attributes properly in nf_queue, from Eric Dumazet.
2) Need to bump memory lock rlimit for test_sockmap bpf test, from
Yonghong Song.
3) Fix VLAN handling in lan78xx driver, from Dave Stevenson.
4) Fix uninitialized read in nf_log, from Jann Horn.
5) Fix raw command length parsing in mlx5, from Alex Vesker.
6) Cleanup loopback RDS connections upon netns deletion, from Sowmini
Varadhan.
7) Fix regressions in FIB rule matching during create, from Jason A.
Donenfeld and Roopa Prabhu.
8) Fix mpls ether type detection in nfp, from Pieter Jansen van Vuuren.
9) More bpfilter build fixes/adjustments from Masahiro Yamada.
10) Fix XDP_{TX,REDIRECT} flushing in various drivers, from Jesper
Dangaard Brouer.
11) fib_tests.sh file permissions were broken, from Shuah Khan.
12) Make sure BH/preemption is disabled in data path of mac80211, from
Denis Kenzior.
13) Don't ignore nla_parse_nested() return values in nl80211, from
Johannes berg.
14) Properly account sock objects ot kmemcg, from Shakeel Butt.
15) Adjustments to setting bpf program permissions to read-only, from
Daniel Borkmann.
16) TCP Fast Open key endianness was broken, it always took on the host
endiannness. Whoops. Explicitly make it little endian. From Yuching
Cheng.
17) Fix prefix route setting for link local addresses in ipv6, from
David Ahern.
18) Potential Spectre v1 in zatm driver, from Gustavo A. R. Silva.
19) Various bpf sockmap fixes, from John Fastabend.
20) Use after free for GRO with ESP, from Sabrina Dubroca.
21) Passing bogus flags to crypto_alloc_shash() in ipv6 SR code, from
Eric Biggers.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (87 commits)
qede: Adverstise software timestamp caps when PHC is not available.
qed: Fix use of incorrect size in memcpy call.
qed: Fix setting of incorrect eswitch mode.
qed: Limit msix vectors in kdump kernel to the minimum required count.
ipvlan: call dev_change_flags when ipvlan mode is reset
ipv6: sr: fix passing wrong flags to crypto_alloc_shash()
net: fix use-after-free in GRO with ESP
tcp: prevent bogus FRTO undos with non-SACK flows
bpf: sockhash, add release routine
bpf: sockhash fix omitted bucket lock in sock_close
bpf: sockmap, fix smap_list_map_remove when psock is in many maps
bpf: sockmap, fix crash when ipv6 sock is added
net: fib_rules: bring back rule_exists to match rule during add
hv_netvsc: split sub-channel setup into async and sync
net: use dev_change_tx_queue_len() for SIOCSIFTXQLEN
atm: zatm: Fix potential Spectre v1
s390/qeth: consistently re-enable device features
s390/qeth: don't clobber buffer on async TX completion
s390/qeth: avoid using is_multicast_ether_addr_64bits on (u8 *)[6]
s390/qeth: fix race when setting MAC address
...
This commit is contained in:
5
Makefile
5
Makefile
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
|
||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
|
||||
CC_CAN_LINK := y
|
||||
export CC_CAN_LINK
|
||||
endif
|
||||
|
||||
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
|
||||
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
|
||||
# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
|
||||
|
||||
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
/* there are 2 passes here */
|
||||
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
|
||||
|
||||
set_memory_ro((unsigned long)header, header->pages);
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
prog->bpf_func = (void *)ctx.target;
|
||||
prog->jited = 1;
|
||||
prog->jited_len = image_size;
|
||||
|
||||
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
goto free_addrs;
|
||||
}
|
||||
if (bpf_jit_prog(&jit, fp)) {
|
||||
bpf_jit_binary_free(header);
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
}
|
||||
|
||||
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
|
||||
skb_queue_head_init(&iadev->rx_dma_q);
|
||||
iadev->rx_free_desc_qhead = NULL;
|
||||
|
||||
iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
|
||||
iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
|
||||
if (!iadev->rx_open) {
|
||||
printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
|
||||
dev->number);
|
||||
|
||||
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
|
||||
return -EFAULT;
|
||||
if (pool < 0 || pool > ZATM_LAST_POOL)
|
||||
return -EINVAL;
|
||||
pool = array_index_nospec(pool,
|
||||
ZATM_LAST_POOL + 1);
|
||||
if (copy_from_user(&info,
|
||||
&((struct zatm_pool_req __user *) arg)->info,
|
||||
sizeof(info))) return -EFAULT;
|
||||
|
||||
@@ -6113,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
|
||||
MLX5_CAP_GEN(mdev, num_vhca_ports));
|
||||
|
||||
if (MLX5_VPORT_MANAGER(mdev) &&
|
||||
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
||||
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
|
||||
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
|
||||
|
||||
|
||||
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
|
||||
bpf_prog_array_free(rcdev->raw->progs);
|
||||
}
|
||||
|
||||
int lirc_prog_attach(const union bpf_attr *attr)
|
||||
int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *prog;
|
||||
struct rc_dev *rcdev;
|
||||
int ret;
|
||||
|
||||
if (attr->attach_flags)
|
||||
return -EINVAL;
|
||||
|
||||
prog = bpf_prog_get_type(attr->attach_bpf_fd,
|
||||
BPF_PROG_TYPE_LIRC_MODE2);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
rcdev = rc_dev_get_from_fd(attr->target_fd);
|
||||
if (IS_ERR(rcdev)) {
|
||||
bpf_prog_put(prog);
|
||||
if (IS_ERR(rcdev))
|
||||
return PTR_ERR(rcdev);
|
||||
}
|
||||
|
||||
ret = lirc_bpf_attach(rcdev, prog);
|
||||
if (ret)
|
||||
bpf_prog_put(prog);
|
||||
|
||||
put_device(&rcdev->dev);
|
||||
|
||||
|
||||
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct alx_priv *alx = pci_get_drvdata(pdev);
|
||||
struct alx_hw *hw = &alx->hw;
|
||||
int err;
|
||||
|
||||
alx_reset_phy(hw);
|
||||
|
||||
if (!netif_running(alx->dev))
|
||||
return 0;
|
||||
netif_device_attach(alx->dev);
|
||||
return __alx_open(alx, true);
|
||||
|
||||
rtnl_lock();
|
||||
err = __alx_open(alx, true);
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
|
||||
|
||||
@@ -1533,6 +1533,7 @@ struct bnx2x {
|
||||
struct link_vars link_vars;
|
||||
u32 link_cnt;
|
||||
struct bnx2x_link_report_data last_reported_link;
|
||||
bool force_link_down;
|
||||
|
||||
struct mdio_if_info mdio;
|
||||
|
||||
|
||||
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
|
||||
{
|
||||
struct bnx2x_link_report_data cur_data;
|
||||
|
||||
if (bp->force_link_down) {
|
||||
bp->link_vars.link_up = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* reread mf_cfg */
|
||||
if (IS_PF(bp) && !CHIP_IS_E1(bp))
|
||||
bnx2x_read_mf_cfg(bp);
|
||||
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
bp->pending_max = 0;
|
||||
}
|
||||
|
||||
bp->force_link_down = false;
|
||||
if (bp->port.pmf) {
|
||||
rc = bnx2x_initial_phy_init(bp, load_mode);
|
||||
if (rc)
|
||||
|
||||
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
|
||||
bp->sp_rtnl_state = 0;
|
||||
smp_mb();
|
||||
|
||||
/* Immediately indicate link as down */
|
||||
bp->link_vars.link_up = 0;
|
||||
bp->force_link_down = true;
|
||||
netif_carrier_off(bp->dev);
|
||||
BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
|
||||
|
||||
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
|
||||
/* When ret value shows failure of allocation failure,
|
||||
* the nic is rebooted again. If open still fails, a error
|
||||
|
||||
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
|
||||
id_tbl->max = size;
|
||||
id_tbl->next = next;
|
||||
spin_lock_init(&id_tbl->lock);
|
||||
id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
|
||||
id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
|
||||
if (!id_tbl->table)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev)
|
||||
int err;
|
||||
u32 reg;
|
||||
|
||||
bp->queues[0].bp = bp;
|
||||
|
||||
dev->netdev_ops = &at91ether_netdev_ops;
|
||||
dev->ethtool_ops = &macb_ethtool_ops;
|
||||
|
||||
|
||||
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
|
||||
/* Default alignment for start of data in an Rx FD */
|
||||
#define DPAA_FD_DATA_ALIGNMENT 16
|
||||
|
||||
/* The DPAA requires 256 bytes reserved and mapped for the SGT */
|
||||
#define DPAA_SGT_SIZE 256
|
||||
|
||||
/* Values for the L3R field of the FM Parse Results
|
||||
*/
|
||||
/* L3 Type field: First IP Present IPv4 */
|
||||
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
|
||||
|
||||
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
|
||||
sizeof(struct qm_sg_entry) * (1 + nr_frags),
|
||||
dma_unmap_single(dev, addr,
|
||||
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
|
||||
dma_dir);
|
||||
|
||||
/* The sgt buffer has been allocated with netdev_alloc_frag(),
|
||||
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
||||
void *sgt_buf;
|
||||
|
||||
/* get a page frag to store the SGTable */
|
||||
sz = SKB_DATA_ALIGN(priv->tx_headroom +
|
||||
sizeof(struct qm_sg_entry) * (1 + nr_frags));
|
||||
sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
|
||||
sgt_buf = netdev_alloc_frag(sz);
|
||||
if (unlikely(!sgt_buf)) {
|
||||
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
|
||||
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
||||
skbh = (struct sk_buff **)buffer_start;
|
||||
*skbh = skb;
|
||||
|
||||
addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
|
||||
sizeof(struct qm_sg_entry) * (1 + nr_frags),
|
||||
dma_dir);
|
||||
addr = dma_map_single(dev, buffer_start,
|
||||
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
|
||||
if (unlikely(dma_mapping_error(dev, addr))) {
|
||||
dev_err(dev, "DMA mapping failed");
|
||||
err = -EINVAL;
|
||||
|
||||
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
|
||||
#define HWP_HXS_PHE_REPORT 0x00000800
|
||||
#define HWP_HXS_PCAC_PSTAT 0x00000100
|
||||
#define HWP_HXS_PCAC_PSTOP 0x00000001
|
||||
#define HWP_HXS_TCP_OFFSET 0xA
|
||||
#define HWP_HXS_UDP_OFFSET 0xB
|
||||
#define HWP_HXS_SH_PAD_REM 0x80000000
|
||||
|
||||
struct fman_port_hwp_regs {
|
||||
struct {
|
||||
u32 ssa; /* Soft Sequence Attachment */
|
||||
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
|
||||
iowrite32be(0xffffffff, ®s->pmda[i].lcv);
|
||||
}
|
||||
|
||||
/* Short packet padding removal from checksum calculation */
|
||||
iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_TCP_OFFSET].ssa);
|
||||
iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_UDP_OFFSET].ssa);
|
||||
|
||||
start_port_hwp(port);
|
||||
}
|
||||
|
||||
|
||||
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
|
||||
{
|
||||
struct hinic_rq *rq = rxq->rq;
|
||||
|
||||
irq_set_affinity_hint(rq->irq, NULL);
|
||||
free_irq(rq->irq, rxq);
|
||||
rx_del_napi(rxq);
|
||||
}
|
||||
|
||||
@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
|
||||
return true;
|
||||
}
|
||||
|
||||
#define I40E_XDP_PASS 0
|
||||
#define I40E_XDP_CONSUMED 1
|
||||
#define I40E_XDP_TX 2
|
||||
#define I40E_XDP_PASS 0
|
||||
#define I40E_XDP_CONSUMED BIT(0)
|
||||
#define I40E_XDP_TX BIT(1)
|
||||
#define I40E_XDP_REDIR BIT(2)
|
||||
|
||||
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
|
||||
struct i40e_ring *xdp_ring);
|
||||
@@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
||||
result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
|
||||
result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
@@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
struct sk_buff *skb = rx_ring->skb;
|
||||
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
|
||||
bool failure = false, xdp_xmit = false;
|
||||
unsigned int xdp_xmit = 0;
|
||||
bool failure = false;
|
||||
struct xdp_buff xdp;
|
||||
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
@@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||
}
|
||||
|
||||
if (IS_ERR(skb)) {
|
||||
if (PTR_ERR(skb) == -I40E_XDP_TX) {
|
||||
xdp_xmit = true;
|
||||
unsigned int xdp_res = -PTR_ERR(skb);
|
||||
|
||||
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
|
||||
xdp_xmit |= xdp_res;
|
||||
i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
|
||||
} else {
|
||||
rx_buffer->pagecnt_bias++;
|
||||
@@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
||||
total_rx_packets++;
|
||||
}
|
||||
|
||||
if (xdp_xmit) {
|
||||
if (xdp_xmit & I40E_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
|
||||
if (xdp_xmit & I40E_XDP_TX) {
|
||||
struct i40e_ring *xdp_ring =
|
||||
rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
||||
|
||||
i40e_xdp_ring_update_tail(xdp_ring);
|
||||
xdp_do_flush_map();
|
||||
}
|
||||
|
||||
rx_ring->skb = skb;
|
||||
|
||||
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
|
||||
return skb;
|
||||
}
|
||||
|
||||
#define IXGBE_XDP_PASS 0
|
||||
#define IXGBE_XDP_CONSUMED 1
|
||||
#define IXGBE_XDP_TX 2
|
||||
#define IXGBE_XDP_PASS 0
|
||||
#define IXGBE_XDP_CONSUMED BIT(0)
|
||||
#define IXGBE_XDP_TX BIT(1)
|
||||
#define IXGBE_XDP_REDIR BIT(2)
|
||||
|
||||
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
|
||||
struct xdp_frame *xdpf);
|
||||
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
|
||||
if (!err)
|
||||
result = IXGBE_XDP_TX;
|
||||
result = IXGBE_XDP_REDIR;
|
||||
else
|
||||
result = IXGBE_XDP_CONSUMED;
|
||||
break;
|
||||
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
unsigned int mss = 0;
|
||||
#endif /* IXGBE_FCOE */
|
||||
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
|
||||
bool xdp_xmit = false;
|
||||
unsigned int xdp_xmit = 0;
|
||||
struct xdp_buff xdp;
|
||||
|
||||
xdp.rxq = &rx_ring->xdp_rxq;
|
||||
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
}
|
||||
|
||||
if (IS_ERR(skb)) {
|
||||
if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
|
||||
xdp_xmit = true;
|
||||
unsigned int xdp_res = -PTR_ERR(skb);
|
||||
|
||||
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
|
||||
xdp_xmit |= xdp_res;
|
||||
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
|
||||
} else {
|
||||
rx_buffer->pagecnt_bias++;
|
||||
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
total_rx_packets++;
|
||||
}
|
||||
|
||||
if (xdp_xmit) {
|
||||
if (xdp_xmit & IXGBE_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
|
||||
if (xdp_xmit & IXGBE_XDP_TX) {
|
||||
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
|
||||
|
||||
/* Force memory writes to complete before letting h/w
|
||||
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
*/
|
||||
wmb();
|
||||
writel(ring->next_to_use, ring->tail);
|
||||
|
||||
xdp_do_flush_map();
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&rx_ring->syncp);
|
||||
|
||||
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
unsigned long flags;
|
||||
bool poll_cmd = ent->polling;
|
||||
int alloc_ret;
|
||||
int cmd_mode;
|
||||
|
||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||
down(sem);
|
||||
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
set_signature(ent, !cmd->checksum_disabled);
|
||||
dump_command(dev, ent, 1);
|
||||
ent->ts1 = ktime_get_ns();
|
||||
cmd_mode = cmd->mode;
|
||||
|
||||
if (ent->callback)
|
||||
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
|
||||
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
|
||||
mmiowb();
|
||||
/* if not in polling don't use ent after this point */
|
||||
if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
|
||||
if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
|
||||
poll_timeout(ent);
|
||||
/* make sure we read the descriptor after ownership is SW */
|
||||
rmb();
|
||||
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
|
||||
{
|
||||
struct mlx5_core_dev *dev = filp->private_data;
|
||||
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
|
||||
char outlen_str[8];
|
||||
char outlen_str[8] = {0};
|
||||
int outlen;
|
||||
void *ptr;
|
||||
int err;
|
||||
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
|
||||
if (copy_from_user(outlen_str, buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
outlen_str[7] = 0;
|
||||
|
||||
err = sscanf(outlen_str, "%d", &outlen);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
||||
mlx5e_activate_channels(&priv->channels);
|
||||
netif_tx_start_all_queues(priv->netdev);
|
||||
|
||||
if (MLX5_VPORT_MANAGER(priv->mdev))
|
||||
if (MLX5_ESWITCH_MANAGER(priv->mdev))
|
||||
mlx5e_add_sqs_fwd_rules(priv);
|
||||
|
||||
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
|
||||
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_redirect_rqts_to_drop(priv);
|
||||
|
||||
if (MLX5_VPORT_MANAGER(priv->mdev))
|
||||
if (MLX5_ESWITCH_MANAGER(priv->mdev))
|
||||
mlx5e_remove_sqs_fwd_rules(priv);
|
||||
|
||||
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
|
||||
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
mlx5e_set_netdev_dev_addr(netdev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
|
||||
if (MLX5_VPORT_MANAGER(mdev))
|
||||
if (MLX5_ESWITCH_MANAGER(mdev))
|
||||
netdev->switchdev_ops = &mlx5e_switchdev_ops;
|
||||
#endif
|
||||
|
||||
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
||||
|
||||
mlx5e_enable_async_events(priv);
|
||||
|
||||
if (MLX5_VPORT_MANAGER(priv->mdev))
|
||||
if (MLX5_ESWITCH_MANAGER(priv->mdev))
|
||||
mlx5e_register_vport_reps(priv);
|
||||
|
||||
if (netdev->reg_state != NETREG_REGISTERED)
|
||||
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
||||
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
|
||||
if (MLX5_VPORT_MANAGER(priv->mdev))
|
||||
if (MLX5_ESWITCH_MANAGER(priv->mdev))
|
||||
mlx5e_unregister_vport_reps(priv);
|
||||
|
||||
mlx5e_disable_async_events(priv);
|
||||
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
|
||||
return NULL;
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
if (MLX5_VPORT_MANAGER(mdev)) {
|
||||
if (MLX5_ESWITCH_MANAGER(mdev)) {
|
||||
rpriv = mlx5e_alloc_nic_rep_priv(mdev);
|
||||
if (!rpriv) {
|
||||
mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user