mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'net-6.4-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Paolo Abeni:
"Including fixes from bluetooth and bpf.
Current release - regressions:
- net: fix skb leak in __skb_tstamp_tx()
- eth: mtk_eth_soc: fix QoS on DSA MAC on non MTK_NETSYS_V2 SoCs
Current release - new code bugs:
- handshake:
- fix sock->file allocation
- fix handshake_dup() ref counting
- bluetooth:
- fix potential double free caused by hci_conn_unlink
- fix UAF in hci_conn_hash_flush
Previous releases - regressions:
- core: fix stack overflow when LRO is disabled for virtual
interfaces
- tls: fix strparser rx issues
- bpf:
- fix many sockmap/TCP related issues
- fix a memory leak in the LRU and LRU_PERCPU hash maps
- init the offload table earlier
- eth: mlx5e:
- do as little as possible in napi poll when budget is 0
- fix using eswitch mapping in nic mode
- fix deadlock in tc route query code
Previous releases - always broken:
- udplite: fix NULL pointer dereference in __sk_mem_raise_allocated()
- raw: fix output xfrm lookup wrt protocol
- smc: reset connection when trying to use SMCRv2 fails
- phy: mscc: enable VSC8501/2 RGMII RX clock
- eth: octeontx2-pf: fix TSOv6 offload
- eth: cdc_ncm: deal with too low values of dwNtbOutMaxSize"
* tag 'net-6.4-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (79 commits)
udplite: Fix NULL pointer dereference in __sk_mem_raise_allocated().
net: phy: mscc: enable VSC8501/2 RGMII RX clock
net: phy: mscc: remove unnecessary phydev locking
net: phy: mscc: add support for VSC8501
net: phy: mscc: add VSC8502 to MODULE_DEVICE_TABLE
net/handshake: Enable the SNI extension to work properly
net/handshake: Unpin sock->file if a handshake is cancelled
net/handshake: handshake_genl_notify() shouldn't ignore @flags
net/handshake: Fix uninitialized local variable
net/handshake: Fix handshake_dup() ref counting
net/handshake: Remove unneeded check from handshake_dup()
ipv6: Fix out-of-bounds access in ipv6_find_tlv()
net: ethernet: mtk_eth_soc: fix QoS on DSA MAC on non MTK_NETSYS_V2 SoCs
docs: netdev: document the existence of the mail bot
net: fix skb leak in __skb_tstamp_tx()
r8169: Use a raw_spinlock_t for the register locks.
page_pool: fix inconsistency for page_pool_ring_[un]lock()
bpf, sockmap: Test progs verifier error with latest clang
bpf, sockmap: Test FIONREAD returns correct bytes in rx buffer with drops
bpf, sockmap: Test FIONREAD returns correct bytes in rx buffer
...
This commit is contained in:
@@ -68,6 +68,9 @@ attribute-sets:
|
||||
type: nest
|
||||
nested-attributes: x509
|
||||
multi-attr: true
|
||||
-
|
||||
name: peername
|
||||
type: string
|
||||
-
|
||||
name: done
|
||||
attributes:
|
||||
@@ -105,6 +108,7 @@ operations:
|
||||
- auth-mode
|
||||
- peer-identity
|
||||
- certificate
|
||||
- peername
|
||||
-
|
||||
name: done
|
||||
doc: Handler reports handshake completion
|
||||
|
||||
@@ -53,6 +53,7 @@ fills in a structure that contains the parameters of the request:
|
||||
struct socket *ta_sock;
|
||||
tls_done_func_t ta_done;
|
||||
void *ta_data;
|
||||
const char *ta_peername;
|
||||
unsigned int ta_timeout_ms;
|
||||
key_serial_t ta_keyring;
|
||||
key_serial_t ta_my_cert;
|
||||
@@ -71,6 +72,10 @@ instantiated a struct file in sock->file.
|
||||
has completed. Further explanation of this function is in the "Handshake
|
||||
Completion" sesction below.
|
||||
|
||||
The consumer can provide a NUL-terminated hostname in the @ta_peername
|
||||
field that is sent as part of ClientHello. If no peername is provided,
|
||||
the DNS hostname associated with the server's IP address is used instead.
|
||||
|
||||
The consumer can fill in the @ta_timeout_ms field to force the servicing
|
||||
handshake agent to exit after a number of milliseconds. This enables the
|
||||
socket to be fully closed once both the kernel and the handshake agent
|
||||
|
||||
@@ -127,13 +127,32 @@ the value of ``Message-ID`` to the URL above.
|
||||
Updating patch status
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
It may be tempting to help the maintainers and update the state of your
|
||||
own patches when you post a new version or spot a bug. Please **do not**
|
||||
do that.
|
||||
Interfering with the patch status on patchwork will only cause confusion. Leave
|
||||
it to the maintainer to figure out what is the most recent and current
|
||||
version that should be applied. If there is any doubt, the maintainer
|
||||
will reply and ask what should be done.
|
||||
Contributors and reviewers do not have the permissions to update patch
|
||||
state directly in patchwork. Patchwork doesn't expose much information
|
||||
about the history of the state of patches, therefore having multiple
|
||||
people update the state leads to confusion.
|
||||
|
||||
Instead of delegating patchwork permissions netdev uses a simple mail
|
||||
bot which looks for special commands/lines within the emails sent to
|
||||
the mailing list. For example to mark a series as Changes Requested
|
||||
one needs to send the following line anywhere in the email thread::
|
||||
|
||||
pw-bot: changes-requested
|
||||
|
||||
As a result the bot will set the entire series to Changes Requested.
|
||||
This may be useful when author discovers a bug in their own series
|
||||
and wants to prevent it from getting applied.
|
||||
|
||||
The use of the bot is entirely optional, if in doubt ignore its existence
|
||||
completely. Maintainers will classify and update the state of the patches
|
||||
themselves. No email should ever be sent to the list with the main purpose
|
||||
of communicating with the bot, the bot commands should be seen as metadata.
|
||||
|
||||
The use of the bot is restricted to authors of the patches (the ``From:``
|
||||
header on patch submission and command must match!), maintainers themselves
|
||||
and a handful of senior reviewers. Bot records its activity here:
|
||||
|
||||
https://patchwork.hopto.org/pw-bot.html
|
||||
|
||||
Review timelines
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -8153,6 +8153,7 @@ F: include/linux/spi/spi-fsl-dspi.h
|
||||
|
||||
FREESCALE ENETC ETHERNET DRIVERS
|
||||
M: Claudiu Manoil <claudiu.manoil@nxp.com>
|
||||
M: Vladimir Oltean <vladimir.oltean@nxp.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/freescale/enetc/
|
||||
|
||||
@@ -1319,17 +1319,17 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
||||
static struct btnxpuart_data w8987_data = {
|
||||
static struct btnxpuart_data w8987_data __maybe_unused = {
|
||||
.helper_fw_name = NULL,
|
||||
.fw_name = FIRMWARE_W8987,
|
||||
};
|
||||
|
||||
static struct btnxpuart_data w8997_data = {
|
||||
static struct btnxpuart_data w8997_data __maybe_unused = {
|
||||
.helper_fw_name = FIRMWARE_HELPER,
|
||||
.fw_name = FIRMWARE_W8997,
|
||||
};
|
||||
|
||||
static const struct of_device_id nxpuart_of_match_table[] = {
|
||||
static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
|
||||
{ .compatible = "nxp,88w8987-bt", .data = &w8987_data },
|
||||
{ .compatible = "nxp,88w8997-bt", .data = &w8997_data },
|
||||
{ }
|
||||
|
||||
@@ -3947,7 +3947,11 @@ static int bond_slave_netdev_event(unsigned long event,
|
||||
unblock_netpoll_tx();
|
||||
break;
|
||||
case NETDEV_FEAT_CHANGE:
|
||||
bond_compute_features(bond);
|
||||
if (!bond->notifier_ctx) {
|
||||
bond->notifier_ctx = true;
|
||||
bond_compute_features(bond);
|
||||
bond->notifier_ctx = false;
|
||||
}
|
||||
break;
|
||||
case NETDEV_RESEND_IGMP:
|
||||
/* Propagate to master device */
|
||||
@@ -6342,6 +6346,8 @@ static int bond_init(struct net_device *bond_dev)
|
||||
if (!bond->wq)
|
||||
return -ENOMEM;
|
||||
|
||||
bond->notifier_ctx = false;
|
||||
|
||||
spin_lock_init(&bond->stats_lock);
|
||||
netdev_lockdep_set_classes(bond_dev);
|
||||
|
||||
|
||||
@@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link)
|
||||
{
|
||||
struct el3_private *lp;
|
||||
struct net_device *dev;
|
||||
int ret;
|
||||
|
||||
dev_dbg(&link->dev, "3c589_attach()\n");
|
||||
|
||||
@@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link)
|
||||
|
||||
dev->ethtool_ops = &netdev_ethtool_ops;
|
||||
|
||||
return tc589_config(link);
|
||||
ret = tc589_config(link);
|
||||
if (ret)
|
||||
goto err_free_netdev;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_netdev:
|
||||
free_netdev(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tc589_detach(struct pcmcia_device *link)
|
||||
|
||||
@@ -3834,6 +3834,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
|
||||
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
|
||||
txq->tx_skbuff[index] = NULL;
|
||||
|
||||
/* Make sure the updates to rest of the descriptor are performed before
|
||||
* transferring ownership.
|
||||
*/
|
||||
dma_wmb();
|
||||
|
||||
/* Send it on its way. Tell FEC it's ready, interrupt when done,
|
||||
* it's the last BD of the frame, and to put the CRC on the end.
|
||||
*/
|
||||
@@ -3843,8 +3848,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
|
||||
/* If this was the last BD in the ring, start at the beginning again. */
|
||||
bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
|
||||
|
||||
/* Make sure the update to bdp are performed before txq->bd.cur. */
|
||||
dma_wmb();
|
||||
|
||||
txq->bd.cur = bdp;
|
||||
|
||||
/* Trigger transmission start */
|
||||
writel(0, txq->bd.reg_desc_active);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3873,12 +3884,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
|
||||
sent_frames++;
|
||||
}
|
||||
|
||||
/* Make sure the update to bdp and tx_skbuff are performed. */
|
||||
wmb();
|
||||
|
||||
/* Trigger transmission start */
|
||||
writel(0, txq->bd.reg_desc_active);
|
||||
|
||||
__netif_tx_unlock(nq);
|
||||
|
||||
return sent_frames;
|
||||
|
||||
@@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
|
||||
htons(ext->lso_sb - skb_network_offset(skb));
|
||||
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
|
||||
ext->lso_format = pfvf->hw.lso_tsov6_idx;
|
||||
|
||||
ipv6_hdr(skb)->payload_len =
|
||||
htons(ext->lso_sb - skb_network_offset(skb));
|
||||
ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
|
||||
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
|
||||
__be16 l3_proto = vlan_get_protocol(skb);
|
||||
struct udphdr *udph = udp_hdr(skb);
|
||||
|
||||
@@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev)
|
||||
eth->dsa_meta[i] = md_dst;
|
||||
}
|
||||
} else {
|
||||
/* Hardware special tag parsing needs to be disabled if at least
|
||||
* one MAC does not use DSA.
|
||||
/* Hardware DSA untagging and VLAN RX offloading need to be
|
||||
* disabled if at least one MAC does not use DSA.
|
||||
*/
|
||||
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
|
||||
|
||||
val &= ~MTK_CDMP_STAG_EN;
|
||||
mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
|
||||
|
||||
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
|
||||
val &= ~MTK_CDMQ_STAG_EN;
|
||||
mtk_w32(eth, val, MTK_CDMQ_IG_CTRL);
|
||||
|
||||
mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
|
||||
}
|
||||
|
||||
|
||||
@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
|
||||
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
|
||||
u32 syndrome, int err)
|
||||
{
|
||||
const char *namep = mlx5_command_str(opcode);
|
||||
struct mlx5_cmd_stats *stats;
|
||||
|
||||
if (!err)
|
||||
if (!err || !(strcmp(namep, "unknown command opcode")))
|
||||
return;
|
||||
|
||||
stats = &dev->cmd.stats[opcode];
|
||||
|
||||
@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
|
||||
mlx5e_txqsq_wake(&ptpsq->txqsq);
|
||||
|
||||
return work_done == budget;
|
||||
}
|
||||
|
||||
|
||||
@@ -1369,11 +1369,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow;
|
||||
|
||||
list_for_each_entry(flow, encap_flows, tmp_list) {
|
||||
struct mlx5_flow_attr *attr = flow->attr;
|
||||
struct mlx5_esw_flow_attr *esw_attr;
|
||||
struct mlx5_flow_attr *attr;
|
||||
|
||||
if (!mlx5e_is_offloaded_flow(flow))
|
||||
continue;
|
||||
|
||||
attr = mlx5e_tc_get_encap_attr(flow);
|
||||
esw_attr = attr->esw_attr;
|
||||
|
||||
if (flow_flag_test(flow, SLOW))
|
||||
|
||||
@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
|
||||
return pi;
|
||||
}
|
||||
|
||||
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
|
||||
|
||||
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
|
||||
|
||||
@@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
|
||||
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
|
||||
{
|
||||
struct mlx5e_priv *out_priv, *route_priv;
|
||||
struct mlx5_devcom *devcom = NULL;
|
||||
struct mlx5_core_dev *route_mdev;
|
||||
struct mlx5_eswitch *esw;
|
||||
u16 vhca_id;
|
||||
int err;
|
||||
|
||||
out_priv = netdev_priv(out_dev);
|
||||
esw = out_priv->mdev->priv.eswitch;
|
||||
@@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
|
||||
|
||||
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
|
||||
if (mlx5_lag_is_active(out_priv->mdev)) {
|
||||
struct mlx5_devcom *devcom;
|
||||
int err;
|
||||
|
||||
/* In lag case we may get devices from different eswitch instances.
|
||||
* If we failed to get vport num, it means, mostly, that we on the wrong
|
||||
* eswitch.
|
||||
@@ -1686,16 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
|
||||
if (err != -ENOENT)
|
||||
return err;
|
||||
|
||||
rcu_read_lock();
|
||||
devcom = out_priv->mdev->priv.devcom;
|
||||
esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
if (!esw)
|
||||
return -ENODEV;
|
||||
esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
|
||||
if (devcom)
|
||||
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
return err;
|
||||
return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -5301,6 +5302,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
|
||||
goto err_action_counter;
|
||||
}
|
||||
|
||||
mlx5_esw_offloads_devcom_init(esw);
|
||||
|
||||
return 0;
|
||||
|
||||
err_action_counter:
|
||||
@@ -5329,7 +5332,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
|
||||
priv = netdev_priv(rpriv->netdev);
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
|
||||
mlx5e_tc_clean_fdb_peer_flows(esw);
|
||||
mlx5_esw_offloads_devcom_cleanup(esw);
|
||||
|
||||
mlx5e_tc_tun_cleanup(uplink_priv->encap);
|
||||
|
||||
@@ -5643,22 +5646,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
|
||||
0, NULL);
|
||||
}
|
||||
|
||||
static struct mapping_ctx *
|
||||
mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_tc_table *tc;
|
||||
struct mlx5_eswitch *esw;
|
||||
struct mapping_ctx *ctx;
|
||||
|
||||
if (is_mdev_switchdev_mode(priv->mdev)) {
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
ctx = esw->offloads.reg_c0_obj_pool;
|
||||
} else {
|
||||
tc = mlx5e_fs_get_tc(priv->fs);
|
||||
ctx = tc->mapping;
|
||||
}
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
|
||||
u64 act_miss_cookie, u32 *act_miss_mapping)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mlx5_mapped_obj mapped_obj = {};
|
||||
struct mlx5_eswitch *esw;
|
||||
struct mapping_ctx *ctx;
|
||||
int err;
|
||||
|
||||
ctx = esw->offloads.reg_c0_obj_pool;
|
||||
|
||||
ctx = mlx5e_get_priv_obj_mapping(priv);
|
||||
mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
|
||||
mapped_obj.act_miss_cookie = act_miss_cookie;
|
||||
err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!is_mdev_switchdev_mode(priv->mdev))
|
||||
return 0;
|
||||
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
|
||||
if (IS_ERR(attr->act_id_restore_rule))
|
||||
goto err_rule;
|
||||
@@ -5673,10 +5697,9 @@ err_rule:
|
||||
void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
|
||||
u32 act_miss_mapping)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mapping_ctx *ctx;
|
||||
struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
|
||||
|
||||
ctx = esw->offloads.reg_c0_obj_pool;
|
||||
mlx5_del_flow_rules(attr->act_id_restore_rule);
|
||||
if (is_mdev_switchdev_mode(priv->mdev))
|
||||
mlx5_del_flow_rules(attr->act_id_restore_rule);
|
||||
mapping_remove(ctx, act_miss_mapping);
|
||||
}
|
||||
|
||||
@@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
|
||||
{
|
||||
if (netif_tx_queue_stopped(sq->txq) &&
|
||||
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
|
||||
mlx5e_ptpsq_fifo_has_room(sq) &&
|
||||
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
|
||||
netif_tx_wake_queue(sq->txq);
|
||||
sq->stats->wake++;
|
||||
}
|
||||
}
|
||||
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mlx5e_sq_stats *stats;
|
||||
@@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
|
||||
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
|
||||
|
||||
if (netif_tx_queue_stopped(sq->txq) &&
|
||||
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
|
||||
mlx5e_ptpsq_fifo_has_room(sq) &&
|
||||
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
|
||||
netif_tx_wake_queue(sq->txq);
|
||||
stats->wake++;
|
||||
}
|
||||
mlx5e_txqsq_wake(sq);
|
||||
|
||||
return (i == MLX5E_TX_CQ_POLL_BUDGET);
|
||||
}
|
||||
|
||||
@@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
}
|
||||
|
||||
/* budget=0 means we may be in IRQ context, do as little as possible */
|
||||
if (unlikely(!budget))
|
||||
goto out;
|
||||
|
||||
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
|
||||
|
||||
if (c->xdp)
|
||||
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
|
||||
|
||||
if (likely(budget)) { /* budget=0 means: don't poll rx rings */
|
||||
if (xsk_open)
|
||||
work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
|
||||
if (xsk_open)
|
||||
work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
|
||||
|
||||
if (likely(budget - work_done))
|
||||
work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
|
||||
if (likely(budget - work_done))
|
||||
work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
|
||||
|
||||
busy |= work_done == budget;
|
||||
}
|
||||
busy |= work_done == budget;
|
||||
|
||||
mlx5e_poll_ico_cq(&c->icosq.cq);
|
||||
if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
|
||||
|
||||
@@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
|
||||
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
|
||||
mlx5_irq_table_destroy(dev);
|
||||
mlx5_irq_table_free_irqs(dev);
|
||||
mutex_unlock(&table->lock);
|
||||
}
|
||||
|
||||
|
||||
@@ -342,6 +342,7 @@ struct mlx5_eswitch {
|
||||
u32 large_group_num;
|
||||
} params;
|
||||
struct blocking_notifier_head n_head;
|
||||
bool paired[MLX5_MAX_PORTS];
|
||||
};
|
||||
|
||||
void esw_offloads_disable(struct mlx5_eswitch *esw);
|
||||
@@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
|
||||
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
|
||||
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
|
||||
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
|
||||
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
|
||||
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
|
||||
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
||||
u16 vport, const u8 *mac);
|
||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
||||
@@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
|
||||
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
|
||||
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
|
||||
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
|
||||
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
|
||||
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
|
||||
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
|
||||
static inline
|
||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
|
||||
|
||||
@@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
||||
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
|
||||
break;
|
||||
|
||||
if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
|
||||
break;
|
||||
|
||||
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
|
||||
if (err)
|
||||
goto err_out;
|
||||
@@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event,
|
||||
if (err)
|
||||
goto err_pair;
|
||||
|
||||
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
|
||||
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
|
||||
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
|
||||
break;
|
||||
|
||||
case ESW_OFFLOADS_DEVCOM_UNPAIR:
|
||||
if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
|
||||
if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
|
||||
break;
|
||||
|
||||
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
|
||||
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
|
||||
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
|
||||
mlx5_esw_offloads_unpair(peer_esw);
|
||||
mlx5_esw_offloads_unpair(esw);
|
||||
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
|
||||
@@ -2779,7 +2786,7 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
|
||||
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
|
||||
|
||||
@@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
|
||||
ESW_OFFLOADS_DEVCOM_PAIR, esw);
|
||||
}
|
||||
|
||||
static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
|
||||
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
|
||||
|
||||
@@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||
if (err)
|
||||
goto err_vports;
|
||||
|
||||
esw_offloads_devcom_init(esw);
|
||||
|
||||
return 0;
|
||||
|
||||
err_vports:
|
||||
@@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||
|
||||
void esw_offloads_disable(struct mlx5_eswitch *esw)
|
||||
{
|
||||
esw_offloads_devcom_cleanup(esw);
|
||||
mlx5_eswitch_disable_pf_vf_vports(esw);
|
||||
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
|
||||
esw_set_passing_vport_metadata(esw, false);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user