You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Various sockmap fixes from John Fastabend (pinned map handling,
blocking in recvmsg, double page put, error handling during redirect
failures, etc.)
2) Fix dead code handling in x86-64 JIT, from Gianluca Borello.
3) Missing device put in RDS IB code, from Dag Moxnes.
4) Don't process fast open during repair mode in TCP< from Yuchung
Cheng.
5) Move address/port comparison fixes in SCTP, from Xin Long.
6) Handle add a bond slave's master into a bridge properly, from
Hangbin Liu.
7) IPv6 multipath code can operate on unitialized memory due to an
assumption that the icmp header is in the linear SKB area. Fix from
Eric Dumazet.
8) Don't invoke do_tcp_sendpages() recursively via TLS, from Dave
Watson.
9) Fix memory leaks in x86-64 JIT, from Daniel Borkmann.
10) RDS leaks kernel memory to userspace, from Eric Dumazet.
11) DCCP can invoke a tasklet on a freed socket, take a refcount. Also
from Eric Dumazet.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (78 commits)
dccp: fix tasklet usage
smc: fix sendpage() call
net/smc: handle unregistered buffers
net/smc: call consolidation
qed: fix spelling mistake: "offloded" -> "offloaded"
net/mlx5e: fix spelling mistake: "loobpack" -> "loopback"
tcp: restore autocorking
rds: do not leak kernel memory to user land
qmi_wwan: do not steal interfaces from class drivers
ipv4: fix fnhe usage by non-cached routes
bpf: sockmap, fix error handling in redirect failures
bpf: sockmap, zero sg_size on error when buffer is released
bpf: sockmap, fix scatterlist update on error path in send with apply
net_sched: fq: take care of throttled flows before reuse
ipv6: Revert "ipv6: Allow non-gateway ECMP for IPv6"
bpf, x64: fix memleak when not converging on calls
bpf, x64: fix memleak when not converging after image
net/smc: restrict non-blocking connect finish
8139too: Use disable_irq_nosync() in rtl8139_poll_controller()
sctp: fix the issue that the cookie-ack with auth can't get processed
...
This commit is contained in:
@@ -557,6 +557,14 @@ A: Although LLVM IR generation and optimization try to stay architecture
|
||||
pulls in some header files containing file scope host assembly codes.
|
||||
- You can add "-fno-jump-tables" to work around the switch table issue.
|
||||
|
||||
Otherwise, you can use bpf target.
|
||||
Otherwise, you can use bpf target. Additionally, you _must_ use bpf target
|
||||
when:
|
||||
|
||||
- Your program uses data structures with pointer or long / unsigned long
|
||||
types that interface with BPF helpers or context data structures. Access
|
||||
into these structures is verified by the BPF verifier and may result
|
||||
in verification failures if the native architecture is not aligned with
|
||||
the BPF architecture, e.g. 64-bit. An example of this is
|
||||
BPF_PROG_TYPE_SK_MSG require '-target bpf'
|
||||
|
||||
Happy BPF hacking!
|
||||
|
||||
@@ -9725,6 +9725,7 @@ W: https://fedorahosted.org/dropwatch/
|
||||
F: net/core/drop_monitor.c
|
||||
|
||||
NETWORKING DRIVERS
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
@@ -12498,6 +12499,7 @@ F: drivers/scsi/st_*.h
|
||||
SCTP PROTOCOL
|
||||
M: Vlad Yasevich <vyasevich@gmail.com>
|
||||
M: Neil Horman <nhorman@tuxdriver.com>
|
||||
M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
|
||||
L: linux-sctp@vger.kernel.org
|
||||
W: http://lksctp.sourceforge.net
|
||||
S: Maintained
|
||||
|
||||
@@ -1027,7 +1027,17 @@ emit_cond_jmp: /* convert BPF opcode to x86 */
|
||||
break;
|
||||
|
||||
case BPF_JMP | BPF_JA:
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
if (insn->off == -1)
|
||||
/* -1 jmp instructions will always jump
|
||||
* backwards two bytes. Explicitly handling
|
||||
* this case avoids wasting too many passes
|
||||
* when there are long sequences of replaced
|
||||
* dead code.
|
||||
*/
|
||||
jmp_offset = -2;
|
||||
else
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
|
||||
if (!jmp_offset)
|
||||
/* optimize out nop jumps */
|
||||
break;
|
||||
@@ -1226,6 +1236,7 @@ skip_init_addrs:
|
||||
for (pass = 0; pass < 20 || image; pass++) {
|
||||
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
|
||||
if (proglen <= 0) {
|
||||
out_image:
|
||||
image = NULL;
|
||||
if (header)
|
||||
bpf_jit_binary_free(header);
|
||||
@@ -1236,8 +1247,7 @@ skip_init_addrs:
|
||||
if (proglen != oldproglen) {
|
||||
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
|
||||
proglen, oldproglen);
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
goto out_image;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -1273,7 +1283,7 @@ skip_init_addrs:
|
||||
prog = orig_prog;
|
||||
}
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
if (!image || !prog->is_func || extra_pass) {
|
||||
out_addrs:
|
||||
kfree(addrs);
|
||||
kfree(jit_data);
|
||||
|
||||
@@ -4757,7 +4757,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
|
||||
return mlx5_get_vector_affinity(dev->mdev, comp_vector);
|
||||
return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
|
||||
}
|
||||
|
||||
/* The mlx5_ib_multiport_mutex should be held when calling this function */
|
||||
|
||||
@@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
|
||||
.ndo_select_queue = bcm_sysport_select_queue,
|
||||
};
|
||||
|
||||
static int bcm_sysport_map_queues(struct net_device *dev,
|
||||
static int bcm_sysport_map_queues(struct notifier_block *nb,
|
||||
struct dsa_notifier_register_info *info)
|
||||
{
|
||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
struct bcm_sysport_tx_ring *ring;
|
||||
struct bcm_sysport_priv *priv;
|
||||
struct net_device *slave_dev;
|
||||
unsigned int num_tx_queues;
|
||||
unsigned int q, start, port;
|
||||
struct net_device *dev;
|
||||
|
||||
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
|
||||
if (priv->netdev != info->master)
|
||||
return 0;
|
||||
|
||||
dev = info->master;
|
||||
|
||||
/* We can't be setting up queue inspection for non directly attached
|
||||
* switches
|
||||
@@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev,
|
||||
if (priv->is_lite)
|
||||
netif_set_real_num_tx_queues(slave_dev,
|
||||
slave_dev->num_tx_queues / 2);
|
||||
|
||||
num_tx_queues = slave_dev->real_num_tx_queues;
|
||||
|
||||
if (priv->per_port_num_tx_queues &&
|
||||
priv->per_port_num_tx_queues != num_tx_queues)
|
||||
netdev_warn(slave_dev, "asymetric number of per-port queues\n");
|
||||
netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
|
||||
|
||||
priv->per_port_num_tx_queues = num_tx_queues;
|
||||
|
||||
@@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
|
||||
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct dsa_notifier_register_info *info;
|
||||
@@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
|
||||
|
||||
info = ptr;
|
||||
|
||||
return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
|
||||
return notifier_from_errno(bcm_sysport_map_queues(nb, info));
|
||||
}
|
||||
|
||||
#define REV_FMT "v%2x.%02x"
|
||||
|
||||
@@ -61,7 +61,7 @@ static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
|
||||
static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
|
||||
"tx-single-collision",
|
||||
"tx-multiple-collision",
|
||||
"tx-late-collsion",
|
||||
"tx-late-collision",
|
||||
"tx-aborted-frames",
|
||||
"tx-lost-frames",
|
||||
"tx-carrier-sense-errors",
|
||||
|
||||
@@ -942,6 +942,7 @@ struct mvpp2 {
|
||||
struct clk *pp_clk;
|
||||
struct clk *gop_clk;
|
||||
struct clk *mg_clk;
|
||||
struct clk *mg_core_clk;
|
||||
struct clk *axi_clk;
|
||||
|
||||
/* List of pointers to port structures */
|
||||
@@ -8768,18 +8769,27 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||
err = clk_prepare_enable(priv->mg_clk);
|
||||
if (err < 0)
|
||||
goto err_gop_clk;
|
||||
|
||||
priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
|
||||
if (IS_ERR(priv->mg_core_clk)) {
|
||||
priv->mg_core_clk = NULL;
|
||||
} else {
|
||||
err = clk_prepare_enable(priv->mg_core_clk);
|
||||
if (err < 0)
|
||||
goto err_mg_clk;
|
||||
}
|
||||
}
|
||||
|
||||
priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
|
||||
if (IS_ERR(priv->axi_clk)) {
|
||||
err = PTR_ERR(priv->axi_clk);
|
||||
if (err == -EPROBE_DEFER)
|
||||
goto err_gop_clk;
|
||||
goto err_mg_core_clk;
|
||||
priv->axi_clk = NULL;
|
||||
} else {
|
||||
err = clk_prepare_enable(priv->axi_clk);
|
||||
if (err < 0)
|
||||
goto err_gop_clk;
|
||||
goto err_mg_core_clk;
|
||||
}
|
||||
|
||||
/* Get system's tclk rate */
|
||||
@@ -8793,7 +8803,7 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||
if (priv->hw_version == MVPP22) {
|
||||
err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
|
||||
if (err)
|
||||
goto err_mg_clk;
|
||||
goto err_axi_clk;
|
||||
/* Sadly, the BM pools all share the same register to
|
||||
* store the high 32 bits of their address. So they
|
||||
* must all have the same high 32 bits, which forces
|
||||
@@ -8801,14 +8811,14 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||
*/
|
||||
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err)
|
||||
goto err_mg_clk;
|
||||
goto err_axi_clk;
|
||||
}
|
||||
|
||||
/* Initialize network controller */
|
||||
err = mvpp2_init(pdev, priv);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to initialize controller\n");
|
||||
goto err_mg_clk;
|
||||
goto err_axi_clk;
|
||||
}
|
||||
|
||||
/* Initialize ports */
|
||||
@@ -8821,7 +8831,7 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||
if (priv->port_count == 0) {
|
||||
dev_err(&pdev->dev, "no ports enabled\n");
|
||||
err = -ENODEV;
|
||||
goto err_mg_clk;
|
||||
goto err_axi_clk;
|
||||
}
|
||||
|
||||
/* Statistics must be gathered regularly because some of them (like
|
||||
@@ -8849,8 +8859,13 @@ err_port_probe:
|
||||
mvpp2_port_remove(priv->port_list[i]);
|
||||
i++;
|
||||
}
|
||||
err_mg_clk:
|
||||
err_axi_clk:
|
||||
clk_disable_unprepare(priv->axi_clk);
|
||||
|
||||
err_mg_core_clk:
|
||||
if (priv->hw_version == MVPP22)
|
||||
clk_disable_unprepare(priv->mg_core_clk);
|
||||
err_mg_clk:
|
||||
if (priv->hw_version == MVPP22)
|
||||
clk_disable_unprepare(priv->mg_clk);
|
||||
err_gop_clk:
|
||||
@@ -8897,6 +8912,7 @@ static int mvpp2_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
clk_disable_unprepare(priv->axi_clk);
|
||||
clk_disable_unprepare(priv->mg_core_clk);
|
||||
clk_disable_unprepare(priv->mg_clk);
|
||||
clk_disable_unprepare(priv->pp_clk);
|
||||
clk_disable_unprepare(priv->gop_clk);
|
||||
|
||||
@@ -1317,7 +1317,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev)
|
||||
|
||||
ret = mlx4_unbond_fs_rules(dev);
|
||||
if (ret)
|
||||
mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
|
||||
mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
|
||||
ret1 = mlx4_unbond_mac_table(dev);
|
||||
if (ret1) {
|
||||
mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
|
||||
|
||||
@@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
goto out;
|
||||
|
||||
new_channels.params = priv->channels.params;
|
||||
mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
priv->channels.params = new_channels.params;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Skip if tx_min_inline is the same */
|
||||
if (new_channels.params.tx_min_inline_mode ==
|
||||
priv->channels.params.tx_min_inline_mode)
|
||||
|
||||
@@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
|
||||
};
|
||||
|
||||
static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_params *params)
|
||||
struct mlx5e_params *params, u16 mtu)
|
||||
{
|
||||
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
|
||||
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
|
||||
params->hard_mtu = MLX5E_ETH_HARD_MTU;
|
||||
params->sw_mtu = mtu;
|
||||
params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
|
||||
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
|
||||
params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
|
||||
@@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
|
||||
|
||||
priv->channels.params.num_channels = profile->max_nch(mdev);
|
||||
|
||||
mlx5e_build_rep_params(mdev, &priv->channels.params);
|
||||
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
|
||||
mlx5e_build_rep_netdev(netdev);
|
||||
|
||||
mlx5e_timestamp_init(priv);
|
||||
|
||||
@@ -290,7 +290,7 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv)
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
netdev_err(priv->netdev,
|
||||
"\tCan't perform loobpack test while device is down\n");
|
||||
"\tCan't perform loopback test while device is down\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
||||
@@ -1864,7 +1864,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
||||
}
|
||||
|
||||
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
|
||||
if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
|
||||
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
|
||||
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
|
||||
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
goto dma_unmap_wqe_err;
|
||||
|
||||
dseg->addr = cpu_to_be64(dma_addr);
|
||||
dseg->lkey = sq->mkey_be;
|
||||
@@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
goto dma_unmap_wqe_err;
|
||||
|
||||
dseg->addr = cpu_to_be64(dma_addr);
|
||||
dseg->lkey = sq->mkey_be;
|
||||
@@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
return num_dma;
|
||||
|
||||
dma_unmap_wqe_err:
|
||||
mlx5e_dma_unmap_wqe_err(sq, num_dma);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
|
||||
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
|
||||
if (unlikely(num_dma < 0))
|
||||
goto dma_unmap_wqe_err;
|
||||
goto err_drop;
|
||||
|
||||
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
|
||||
num_bytes, num_dma, wi, cseg);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_unmap_wqe_err:
|
||||
err_drop:
|
||||
sq->stats.dropped++;
|
||||
mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
@@ -645,17 +647,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
|
||||
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
|
||||
if (unlikely(num_dma < 0))
|
||||
goto dma_unmap_wqe_err;
|
||||
goto err_drop;
|
||||
|
||||
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
|
||||
num_bytes, num_dma, wi, cseg);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_unmap_wqe_err:
|
||||
err_drop:
|
||||
sq->stats.dropped++;
|
||||
mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
@@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node);
|
||||
static void del_sw_hw_rule(struct fs_node *node);
|
||||
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
|
||||
struct mlx5_flow_destination *d2);
|
||||
static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
|
||||
static struct mlx5_flow_rule *
|
||||
find_flow_rule(struct fs_fte *fte,
|
||||
struct mlx5_flow_destination *dest);
|
||||
@@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node)
|
||||
|
||||
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
|
||||
--fte->dests_size) {
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
|
||||
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
|
||||
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
|
||||
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
update_fte = true;
|
||||
goto out;
|
||||
@@ -2351,23 +2353,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
|
||||
|
||||
static int init_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
int err;
|
||||
|
||||
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
|
||||
if (!steering->root_ns)
|
||||
goto cleanup;
|
||||
return -ENOMEM;
|
||||
|
||||
if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
|
||||
goto cleanup;
|
||||
err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
set_prio_attrs(steering->root_ns);
|
||||
|
||||
if (create_anchor_flow_table(steering))
|
||||
goto cleanup;
|
||||
err = create_anchor_flow_table(steering);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
mlx5_cleanup_fs(steering->dev);
|
||||
return -ENOMEM;
|
||||
out_err:
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
steering->root_ns = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void clean_tree(struct fs_node *node)
|
||||
|
||||
@@ -1718,13 +1718,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct net_device *dev = mlxsw_sp_port->dev;
|
||||
int err;
|
||||
|
||||
if (bridge_port->bridge_device->multicast_enabled) {
|
||||
if (bridge_port->bridge_device->multicast_enabled) {
|
||||
err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
|
||||
false);
|
||||
if (err)
|
||||
netdev_err(dev, "Unable to remove port from SMID\n");
|
||||
}
|
||||
if (bridge_port->bridge_device->multicast_enabled &&
|
||||
!bridge_port->mrouter) {
|
||||
err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
|
||||
if (err)
|
||||
netdev_err(dev, "Unable to remove port from SMID\n");
|
||||
}
|
||||
|
||||
err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
|
||||
|
||||
@@ -183,17 +183,21 @@ static int
|
||||
nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
|
||||
const struct tc_action *action,
|
||||
struct nfp_fl_pre_tunnel *pre_tun,
|
||||
enum nfp_flower_tun_type tun_type)
|
||||
enum nfp_flower_tun_type tun_type,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
|
||||
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
|
||||
u32 tmp_set_ip_tun_type_index = 0;
|
||||
/* Currently support one pre-tunnel so index is always 0. */
|
||||
int pretun_idx = 0;
|
||||
struct net *net;
|
||||
|
||||
if (ip_tun->options_len)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
net = dev_net(netdev);
|
||||
|
||||
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
|
||||
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
|
||||
|
||||
@@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
|
||||
|
||||
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
|
||||
set_tun->tun_id = ip_tun->key.tun_id;
|
||||
set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
|
||||
|
||||
/* Complete pre_tunnel action. */
|
||||
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
|
||||
@@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a,
|
||||
*a_len += sizeof(struct nfp_fl_pre_tunnel);
|
||||
|
||||
set_tun = (void *)&nfp_fl->action_data[*a_len];
|
||||
err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
|
||||
err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
|
||||
netdev);
|
||||
if (err)
|
||||
return err;
|
||||
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
|
||||
|
||||
@@ -190,7 +190,10 @@ struct nfp_fl_set_ipv4_udp_tun {
|
||||
__be16 reserved;
|
||||
__be64 tun_id __packed;
|
||||
__be32 tun_type_index;
|
||||
__be32 extra[3];
|
||||
__be16 reserved2;
|
||||
u8 ttl;
|
||||
u8 reserved3;
|
||||
__be32 extra[2];
|
||||
};
|
||||
|
||||
/* Metadata with L2 (1W/4B)
|
||||
|
||||
@@ -360,7 +360,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
|
||||
}
|
||||
|
||||
SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
|
||||
nfp_net_get_mac_addr(app->pf, port);
|
||||
nfp_net_get_mac_addr(app->pf, repr, port);
|
||||
|
||||
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
|
||||
err = nfp_repr_init(app, repr,
|
||||
|
||||
@@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
|
||||
if (err)
|
||||
return err < 0 ? err : 0;
|
||||
|
||||
nfp_net_get_mac_addr(app->pf, nn->port);
|
||||
nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf);
|
||||
int nfp_hwmon_register(struct nfp_pf *pf);
|
||||
void nfp_hwmon_unregister(struct nfp_pf *pf);
|
||||
|
||||
void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
|
||||
void
|
||||
nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
|
||||
struct nfp_port *port);
|
||||
|
||||
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user