mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'net-6.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
"Including fixes from BPF and big collection of fixes for WiFi core and
drivers.
Current release - regressions:
- vxlan: fix regression when dropping packets due to invalid src
addresses
- bpf: fix a potential use-after-free in bpf_link_free()
- xdp: revert support for redirect to any xsk socket bound to the
same UMEM as it can result in a corruption
- virtio_net:
- add missing lock protection when reading return code from
control_buf
- fix false-positive lockdep splat in DIM
- Revert "wifi: wilc1000: convert list management to RCU"
- wifi: ath11k: fix error path in ath11k_pcic_ext_irq_config
Previous releases - regressions:
- rtnetlink: make the "split" NLM_DONE handling generic, restore the
old behavior for two cases where we started coalescing those
messages with normal messages, breaking sloppily-coded userspace
- wifi:
- cfg80211: validate HE operation element parsing
- cfg80211: fix 6 GHz scan request building
- mt76: mt7615: add missing chanctx ops
- ath11k: move power type check to ASSOC stage, fix connecting to
6 GHz AP
- ath11k: fix WCN6750 firmware crash caused by 17 num_vdevs
- rtlwifi: ignore IEEE80211_CONF_CHANGE_RETRY_LIMITS
- iwlwifi: mvm: fix a crash on 7265
Previous releases - always broken:
- ncsi: prevent multi-threaded channel probing, a spec violation
- vmxnet3: disable rx data ring on dma allocation failure
- ethtool: init tsinfo stats if requested, prevent unintentionally
reporting all-zero stats on devices which don't implement any
- dst_cache: fix possible races in less common IPv6 features
- tcp: auth: don't consider TCP_CLOSE to be in TCP_AO_ESTABLISHED
- ax25: fix two refcounting bugs
- eth: ionic: fix kernel panic in XDP_TX action
Misc:
- tcp: count CLOSE-WAIT sockets for TCP_MIB_CURRESTAB"
* tag 'net-6.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (107 commits)
selftests: net: lib: set 'i' as local
selftests: net: lib: avoid error removing empty netns name
selftests: net: lib: support errexit with busywait
net: ethtool: fix the error condition in ethtool_get_phy_stats_ethtool()
ipv6: fix possible race in __fib6_drop_pcpu_from()
af_unix: Annotate data-race of sk->sk_shutdown in sk_diag_fill().
af_unix: Use skb_queue_len_lockless() in sk_diag_show_rqlen().
af_unix: Use skb_queue_empty_lockless() in unix_release_sock().
af_unix: Use unix_recvq_full_lockless() in unix_stream_connect().
af_unix: Annotate data-race of net->unx.sysctl_max_dgram_qlen.
af_unix: Annotate data-races around sk->sk_sndbuf.
af_unix: Annotate data-races around sk->sk_state in UNIX_DIAG.
af_unix: Annotate data-race of sk->sk_state in unix_stream_read_skb().
af_unix: Annotate data-races around sk->sk_state in sendmsg() and recvmsg().
af_unix: Annotate data-race of sk->sk_state in unix_accept().
af_unix: Annotate data-race of sk->sk_state in unix_stream_connect().
af_unix: Annotate data-races around sk->sk_state in unix_write_space() and poll().
af_unix: Annotate data-race of sk->sk_state in unix_inq_len().
af_unix: Annodate data-races around sk->sk_state for writers.
af_unix: Set sk->sk_state under unix_state_lock() for truly disconencted peer.
...
This commit is contained in:
@@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
|
||||
sxdp_shared_umem_fd field as you registered the UMEM on that
|
||||
socket. These two sockets will now share one and the same UMEM.
|
||||
|
||||
In this case, it is possible to use the NIC's packet steering
|
||||
capabilities to steer the packets to the right queue. This is not
|
||||
possible in the previous example as there is only one queue shared
|
||||
among sockets, so the NIC cannot do this steering as it can only steer
|
||||
between queues.
|
||||
There is no need to supply an XDP program like the one in the previous
|
||||
case where sockets were bound to the same queue id and
|
||||
device. Instead, use the NIC's packet steering capabilities to steer
|
||||
the packets to the right queue. In the previous example, there is only
|
||||
one queue shared among sockets, so the NIC cannot do this steering. It
|
||||
can only steer between queues.
|
||||
|
||||
In libxdp (or libbpf prior to version 1.0), you need to use the
|
||||
xsk_socket__create_shared() API as it takes a reference to a FILL ring
|
||||
and a COMPLETION ring that will be created for you and bound to the
|
||||
shared UMEM. You can use this function for all the sockets you create,
|
||||
or you can use it for the second and following ones and use
|
||||
xsk_socket__create() for the first one. Both methods yield the same
|
||||
result.
|
||||
In libbpf, you need to use the xsk_socket__create_shared() API as it
|
||||
takes a reference to a FILL ring and a COMPLETION ring that will be
|
||||
created for you and bound to the shared UMEM. You can use this
|
||||
function for all the sockets you create, or you can use it for the
|
||||
second and following ones and use xsk_socket__create() for the first
|
||||
one. Both methods yield the same result.
|
||||
|
||||
Note that a UMEM can be shared between sockets on the same queue id
|
||||
and device, as well as between queues on the same device and between
|
||||
devices at the same time. It is also possible to redirect to any
|
||||
socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
|
||||
devices at the same time.
|
||||
|
||||
XDP_USE_NEED_WAKEUP bind flag
|
||||
-----------------------------
|
||||
@@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
|
||||
switch, or other distribution mechanism, in your NIC to direct
|
||||
traffic to the correct queue id and socket.
|
||||
|
||||
Note that if you are using the XDP_SHARED_UMEM option, it is
|
||||
possible to switch traffic between any socket bound to the same
|
||||
umem.
|
||||
|
||||
Q: My packets are sometimes corrupted. What is wrong?
|
||||
|
||||
A: Care has to be taken not to feed the same buffer in the UMEM into
|
||||
|
||||
@@ -15237,7 +15237,6 @@ F: drivers/staging/most/
|
||||
F: include/linux/most.h
|
||||
|
||||
MOTORCOMM PHY DRIVER
|
||||
M: Peter Geis <pgwipeout@gmail.com>
|
||||
M: Frank <Frank.Sae@motor-comm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
@@ -409,7 +409,6 @@ struct ice_vsi {
|
||||
struct ice_tc_cfg tc_cfg;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct ice_tx_ring **xdp_rings; /* XDP ring array */
|
||||
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
|
||||
u16 num_xdp_txq; /* Used XDP queues */
|
||||
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
|
||||
|
||||
@@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
|
||||
ring->flags |= ICE_TX_FLAGS_RING_XDP;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
|
||||
* @vsi: pointer to VSI
|
||||
* @qid: index of a queue to look at XSK buff pool presence
|
||||
*
|
||||
* Return: A pointer to xsk_buff_pool structure if there is a buffer pool
|
||||
* attached and configured as zero-copy, NULL otherwise.
|
||||
*/
|
||||
static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
|
||||
u16 qid)
|
||||
{
|
||||
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
|
||||
|
||||
if (!ice_is_xdp_ena_vsi(vsi))
|
||||
return NULL;
|
||||
|
||||
return (pool && pool->dev) ? pool : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xsk_pool - get XSK buffer pool bound to a ring
|
||||
* @ring: Rx ring to use
|
||||
@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
|
||||
struct ice_vsi *vsi = ring->vsi;
|
||||
u16 qid = ring->q_index;
|
||||
|
||||
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
|
||||
return NULL;
|
||||
|
||||
return xsk_get_pool_from_qid(vsi->netdev, qid);
|
||||
return ice_get_xp_from_qid(vsi, qid);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
|
||||
if (!ring)
|
||||
return;
|
||||
|
||||
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
|
||||
ring->xsk_pool = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
|
||||
ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi);
|
||||
int ice_down_up(struct ice_vsi *vsi);
|
||||
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
|
||||
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
|
||||
|
||||
enum ice_xdp_cfg {
|
||||
ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
|
||||
ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
|
||||
};
|
||||
|
||||
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
enum ice_xdp_cfg cfg_type);
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
|
||||
void ice_map_xdp_rings(struct ice_vsi *vsi);
|
||||
int
|
||||
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
u32 flags);
|
||||
|
||||
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
|
||||
}
|
||||
rx_rings_rem -= rx_rings_per_v;
|
||||
}
|
||||
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
ice_map_xdp_rings(vsi);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
|
||||
if (!vsi->q_vectors)
|
||||
goto err_vectors;
|
||||
|
||||
vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
|
||||
if (!vsi->af_xdp_zc_qps)
|
||||
goto err_zc_qps;
|
||||
|
||||
return 0;
|
||||
|
||||
err_zc_qps:
|
||||
devm_kfree(dev, vsi->q_vectors);
|
||||
err_vectors:
|
||||
devm_kfree(dev, vsi->rxq_map);
|
||||
err_rxq_map:
|
||||
@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
bitmap_free(vsi->af_xdp_zc_qps);
|
||||
vsi->af_xdp_zc_qps = NULL;
|
||||
/* free the ring and vector containers */
|
||||
devm_kfree(dev, vsi->q_vectors);
|
||||
vsi->q_vectors = NULL;
|
||||
@@ -2282,6 +2274,16 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
|
||||
if (ret)
|
||||
goto unroll_vector_base;
|
||||
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
ret = ice_vsi_determine_xdp_res(vsi);
|
||||
if (ret)
|
||||
goto unroll_vector_base;
|
||||
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
|
||||
ICE_XDP_CFG_PART);
|
||||
if (ret)
|
||||
goto unroll_vector_base;
|
||||
}
|
||||
|
||||
ice_vsi_map_rings_to_vectors(vsi);
|
||||
|
||||
/* Associate q_vector rings to napi */
|
||||
@@ -2289,15 +2291,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
|
||||
|
||||
vsi->stat_offsets_loaded = false;
|
||||
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
ret = ice_vsi_determine_xdp_res(vsi);
|
||||
if (ret)
|
||||
goto unroll_vector_base;
|
||||
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
|
||||
if (ret)
|
||||
goto unroll_vector_base;
|
||||
}
|
||||
|
||||
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
|
||||
if (vsi->type != ICE_VSI_CTRL)
|
||||
/* Do not exit if configuring RSS had an issue, at
|
||||
@@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
|
||||
/* return value check can be skipped here, it always returns
|
||||
* 0 if reset is in progress
|
||||
*/
|
||||
ice_destroy_xdp_rings(vsi);
|
||||
ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
|
||||
|
||||
ice_vsi_clear_rings(vsi);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
||||
@@ -2707,48 +2707,33 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
|
||||
bpf_prog_put(old_prog);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
|
||||
* @vsi: VSI to bring up Tx rings used by XDP
|
||||
* @prog: bpf program that will be assigned to VSI
|
||||
*
|
||||
* Return 0 on success and negative value on error
|
||||
*/
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
|
||||
static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
int xdp_rings_rem = vsi->num_xdp_txq;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_qs_cfg xdp_qs_cfg = {
|
||||
.qs_mutex = &pf->avail_q_mutex,
|
||||
.pf_map = pf->avail_txqs,
|
||||
.pf_map_size = pf->max_pf_txqs,
|
||||
.q_count = vsi->num_xdp_txq,
|
||||
.scatter_count = ICE_MAX_SCATTER_TXQS,
|
||||
.vsi_map = vsi->txq_map,
|
||||
.vsi_map_offset = vsi->alloc_txq,
|
||||
.mapping_mode = ICE_VSI_MAP_CONTIG
|
||||
};
|
||||
struct device *dev;
|
||||
int i, v_idx;
|
||||
int status;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
|
||||
sizeof(*vsi->xdp_rings), GFP_KERNEL);
|
||||
if (!vsi->xdp_rings)
|
||||
return -ENOMEM;
|
||||
|
||||
vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
|
||||
if (__ice_vsi_get_qs(&xdp_qs_cfg))
|
||||
goto err_map_xdp;
|
||||
struct ice_q_vector *q_vector;
|
||||
struct ice_tx_ring *ring;
|
||||
|
||||
if (static_key_enabled(&ice_xdp_locking_key))
|
||||
netdev_warn(vsi->netdev,
|
||||
"Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
|
||||
return vsi->xdp_rings[qid % vsi->num_xdp_txq];
|
||||
|
||||
if (ice_xdp_alloc_setup_rings(vsi))
|
||||
goto clear_xdp_rings;
|
||||
q_vector = vsi->rx_rings[qid]->q_vector;
|
||||
ice_for_each_tx_ring(ring, q_vector->tx)
|
||||
if (ice_ring_is_xdp(ring))
|
||||
return ring;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_map_xdp_rings - Map XDP rings to interrupt vectors
|
||||
* @vsi: the VSI with XDP rings being configured
|
||||
*
|
||||
* Map XDP rings to interrupt vectors and perform the configuration steps
|
||||
* dependent on the mapping.
|
||||
*/
|
||||
void ice_map_xdp_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
int xdp_rings_rem = vsi->num_xdp_txq;
|
||||
int v_idx, q_idx;
|
||||
|
||||
/* follow the logic from ice_vsi_map_rings_to_vectors */
|
||||
ice_for_each_q_vector(vsi, v_idx) {
|
||||
@@ -2769,30 +2754,65 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
|
||||
xdp_rings_rem -= xdp_rings_per_v;
|
||||
}
|
||||
|
||||
ice_for_each_rxq(vsi, i) {
|
||||
if (static_key_enabled(&ice_xdp_locking_key)) {
|
||||
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
|
||||
} else {
|
||||
struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
|
||||
struct ice_tx_ring *ring;
|
||||
|
||||
ice_for_each_tx_ring(ring, q_vector->tx) {
|
||||
if (ice_ring_is_xdp(ring)) {
|
||||
vsi->rx_rings[i]->xdp_ring = ring;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
ice_tx_xsk_pool(vsi, i);
|
||||
ice_for_each_rxq(vsi, q_idx) {
|
||||
vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
|
||||
q_idx);
|
||||
ice_tx_xsk_pool(vsi, q_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
|
||||
* @vsi: VSI to bring up Tx rings used by XDP
|
||||
* @prog: bpf program that will be assigned to VSI
|
||||
* @cfg_type: create from scratch or restore the existing configuration
|
||||
*
|
||||
* Return 0 on success and negative value on error
|
||||
*/
|
||||
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
enum ice_xdp_cfg cfg_type)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_qs_cfg xdp_qs_cfg = {
|
||||
.qs_mutex = &pf->avail_q_mutex,
|
||||
.pf_map = pf->avail_txqs,
|
||||
.pf_map_size = pf->max_pf_txqs,
|
||||
.q_count = vsi->num_xdp_txq,
|
||||
.scatter_count = ICE_MAX_SCATTER_TXQS,
|
||||
.vsi_map = vsi->txq_map,
|
||||
.vsi_map_offset = vsi->alloc_txq,
|
||||
.mapping_mode = ICE_VSI_MAP_CONTIG
|
||||
};
|
||||
struct device *dev;
|
||||
int status, i;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
|
||||
sizeof(*vsi->xdp_rings), GFP_KERNEL);
|
||||
if (!vsi->xdp_rings)
|
||||
return -ENOMEM;
|
||||
|
||||
vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
|
||||
if (__ice_vsi_get_qs(&xdp_qs_cfg))
|
||||
goto err_map_xdp;
|
||||
|
||||
if (static_key_enabled(&ice_xdp_locking_key))
|
||||
netdev_warn(vsi->netdev,
|
||||
"Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
|
||||
|
||||
if (ice_xdp_alloc_setup_rings(vsi))
|
||||
goto clear_xdp_rings;
|
||||
|
||||
/* omit the scheduler update if in reset path; XDP queues will be
|
||||
* taken into account at the end of ice_vsi_rebuild, where
|
||||
* ice_cfg_vsi_lan is being called
|
||||
*/
|
||||
if (ice_is_reset_in_progress(pf->state))
|
||||
if (cfg_type == ICE_XDP_CFG_PART)
|
||||
return 0;
|
||||
|
||||
ice_map_xdp_rings(vsi);
|
||||
|
||||
/* tell the Tx scheduler that right now we have
|
||||
* additional queues
|
||||
*/
|
||||
@@ -2842,22 +2862,21 @@ err_map_xdp:
|
||||
/**
|
||||
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
|
||||
* @vsi: VSI to remove XDP rings
|
||||
* @cfg_type: disable XDP permanently or allow it to be restored later
|
||||
*
|
||||
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
|
||||
* resources
|
||||
*/
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi)
|
||||
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int i, v_idx;
|
||||
|
||||
/* q_vectors are freed in reset path so there's no point in detaching
|
||||
* rings; in case of rebuild being triggered not from reset bits
|
||||
* in pf->state won't be set, so additionally check first q_vector
|
||||
* against NULL
|
||||
* rings
|
||||
*/
|
||||
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
|
||||
if (cfg_type == ICE_XDP_CFG_PART)
|
||||
goto free_qmap;
|
||||
|
||||
ice_for_each_q_vector(vsi, v_idx) {
|
||||
@@ -2898,7 +2917,7 @@ free_qmap:
|
||||
if (static_key_enabled(&ice_xdp_locking_key))
|
||||
static_branch_dec(&ice_xdp_locking_key);
|
||||
|
||||
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
|
||||
if (cfg_type == ICE_XDP_CFG_PART)
|
||||
return 0;
|
||||
|
||||
ice_vsi_assign_bpf_prog(vsi, NULL);
|
||||
@@ -3009,7 +3028,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
if (xdp_ring_err) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
|
||||
} else {
|
||||
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
|
||||
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
|
||||
ICE_XDP_CFG_FULL);
|
||||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
|
||||
}
|
||||
@@ -3020,7 +3040,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
|
||||
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
|
||||
xdp_features_clear_redirect_target(vsi->netdev);
|
||||
xdp_ring_err = ice_destroy_xdp_rings(vsi);
|
||||
xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
|
||||
if (xdp_ring_err)
|
||||
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
|
||||
/* reallocate Rx queues that were used for zero-copy */
|
||||
|
||||
@@ -374,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
|
||||
*
|
||||
* Read the specified word from the copy of the Shadow RAM found in the
|
||||
* specified NVM module.
|
||||
*
|
||||
* Note that the Shadow RAM copy is always located after the CSS header, and
|
||||
* is aligned to 64-byte (32-word) offsets.
|
||||
*/
|
||||
static int
|
||||
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
|
||||
{
|
||||
return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
|
||||
u32 sr_copy;
|
||||
|
||||
switch (bank) {
|
||||
case ICE_ACTIVE_FLASH_BANK:
|
||||
sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
|
||||
break;
|
||||
case ICE_INACTIVE_FLASH_BANK:
|
||||
sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
|
||||
break;
|
||||
}
|
||||
|
||||
return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -440,8 +454,7 @@ int
|
||||
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
|
||||
u16 module_type)
|
||||
{
|
||||
u16 pfa_len, pfa_ptr;
|
||||
u16 next_tlv;
|
||||
u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
|
||||
int status;
|
||||
|
||||
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
|
||||
@@ -454,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
|
||||
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
/* The Preserved Fields Area contains a sequence of Type-Length-Value
|
||||
* structures which define its contents. The PFA length includes all
|
||||
* of the TLVs, plus the initial length word itself, *and* one final
|
||||
* word at the end after all of the TLVs.
|
||||
*/
|
||||
if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
|
||||
dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
|
||||
pfa_ptr, pfa_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Starting with first TLV after PFA length, iterate through the list
|
||||
* of TLVs to find the requested one.
|
||||
*/
|
||||
next_tlv = pfa_ptr + 1;
|
||||
while (next_tlv < pfa_ptr + pfa_len) {
|
||||
while (next_tlv < max_tlv) {
|
||||
u16 tlv_sub_module_type;
|
||||
u16 tlv_len;
|
||||
|
||||
@@ -482,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Check next TLV, i.e. current TLV pointer + length + 2 words
|
||||
* (for current TLV's type and length)
|
||||
*/
|
||||
next_tlv = next_tlv + tlv_len + 2;
|
||||
|
||||
if (check_add_overflow(next_tlv, 2, &next_tlv) ||
|
||||
check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
|
||||
dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
|
||||
tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
/* Module does not exist */
|
||||
return -ENOENT;
|
||||
@@ -1009,6 +1037,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
|
||||
* @hw: pointer to the HW struct
|
||||
* @bank: whether to read from the active or inactive flash bank
|
||||
* @hdr_len: storage for header length in words
|
||||
*
|
||||
* Read the CSS header length from the NVM CSS header and add the Authentication
|
||||
* header size, and then convert to words.
|
||||
*
|
||||
* Return: zero on success, or a negative error code on failure.
|
||||
*/
|
||||
static int
|
||||
ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
|
||||
u32 *hdr_len)
|
||||
{
|
||||
u16 hdr_len_l, hdr_len_h;
|
||||
u32 hdr_len_dword;
|
||||
int status;
|
||||
|
||||
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
|
||||
&hdr_len_l);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
|
||||
&hdr_len_h);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* CSS header length is in DWORD, so convert to words and add
|
||||
* authentication header size
|
||||
*/
|
||||
hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
|
||||
*hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_determine_css_hdr_len - Discover CSS header length for the device
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Determine the size of the CSS header at the start of the NVM module. This
|
||||
* is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
|
||||
* always located just after the CSS header.
|
||||
*
|
||||
* Return: zero on success, or a negative error code on failure.
|
||||
*/
|
||||
static int ice_determine_css_hdr_len(struct ice_hw *hw)
|
||||
{
|
||||
struct ice_bank_info *banks = &hw->flash.banks;
|
||||
int status;
|
||||
|
||||
status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
|
||||
&banks->active_css_hdr_len);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
|
||||
&banks->inactive_css_hdr_len);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_init_nvm - initializes NVM setting
|
||||
* @hw: pointer to the HW struct
|
||||
@@ -1055,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw)
|
||||
return status;
|
||||
}
|
||||
|
||||
status = ice_determine_css_hdr_len(hw);
|
||||
if (status) {
|
||||
ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
|
||||
if (status) {
|
||||
ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
|
||||
|
||||
@@ -482,6 +482,8 @@ struct ice_bank_info {
|
||||
u32 orom_size; /* Size of OROM bank */
|
||||
u32 netlist_ptr; /* Pointer to 1st Netlist bank */
|
||||
u32 netlist_size; /* Size of Netlist bank */
|
||||
u32 active_css_hdr_len; /* Active CSS header length */
|
||||
u32 inactive_css_hdr_len; /* Inactive CSS header length */
|
||||
enum ice_flash_bank nvm_bank; /* Active NVM bank */
|
||||
enum ice_flash_bank orom_bank; /* Active OROM bank */
|
||||
enum ice_flash_bank netlist_bank; /* Active Netlist bank */
|
||||
@@ -1087,17 +1089,13 @@ struct ice_aq_get_set_rss_lut_params {
|
||||
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
|
||||
|
||||
/* CSS Header words */
|
||||
#define ICE_NVM_CSS_HDR_LEN_L 0x02
|
||||
#define ICE_NVM_CSS_HDR_LEN_H 0x03
|
||||
#define ICE_NVM_CSS_SREV_L 0x14
|
||||
#define ICE_NVM_CSS_SREV_H 0x15
|
||||
|
||||
/* Length of CSS header section in words */
|
||||
#define ICE_CSS_HEADER_LENGTH 330
|
||||
|
||||
/* Offset of Shadow RAM copy in the NVM bank area. */
|
||||
#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
|
||||
|
||||
/* Size in bytes of Option ROM trailer */
|
||||
#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
|
||||
/* Length of Authentication header section in words */
|
||||
#define ICE_NVM_AUTH_HEADER_LEN 0x08
|
||||
|
||||
/* The Link Topology Netlist section is stored as a series of words. It is
|
||||
* stored in the NVM as a TLV, with the first two words containing the type
|
||||
|
||||
@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
|
||||
if (!pool)
|
||||
return -EINVAL;
|
||||
|
||||
clear_bit(qid, vsi->af_xdp_zc_qps);
|
||||
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
|
||||
|
||||
return 0;
|
||||
@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set_bit(qid, vsi->af_xdp_zc_qps);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
|
||||
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
|
||||
{
|
||||
struct ice_rx_ring *rx_ring;
|
||||
unsigned long q;
|
||||
uint i;
|
||||
|
||||
ice_for_each_rxq(vsi, i) {
|
||||
rx_ring = vsi->rx_rings[i];
|
||||
if (!rx_ring->xsk_pool)
|
||||
continue;
|
||||
|
||||
for_each_set_bit(q, vsi->af_xdp_zc_qps,
|
||||
max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
|
||||
rx_ring = vsi->rx_rings[q];
|
||||
if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
|
||||
struct igc_hw *hw = &adapter->hw;
|
||||
u32 eeer;
|
||||
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
|
||||
edata->supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
|
||||
edata->supported);
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
|
||||
edata->supported);
|
||||
|
||||
if (hw->dev_spec._base.eee_enable)
|
||||
mii_eee_cap1_mod_linkmode_t(edata->advertised,
|
||||
adapter->eee_advert);
|
||||
|
||||
*edata = adapter->eee;
|
||||
|
||||
eeer = rd32(IGC_EEER);
|
||||
|
||||
/* EEE status on negotiated link */
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <net/xdp_sock_drv.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mdio.h>
|
||||
|
||||
#include <net/ipv6.h>
|
||||
|
||||
@@ -4975,6 +4976,9 @@ void igc_up(struct igc_adapter *adapter)
|
||||
/* start the watchdog. */
|
||||
hw->mac.get_link_status = true;
|
||||
schedule_work(&adapter->watchdog_task);
|
||||
|
||||
adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T |
|
||||
MDIO_EEE_2_5GT;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
|
||||
* - when available free entries are less.
|
||||
* Lower priority ones out of avaialble free entries are always
|
||||
* chosen when 'high vs low' question arises.
|
||||
*
|
||||
* For a VF base MCAM match rule is set by its PF. And all the
|
||||
* further MCAM rules installed by VF on its own are
|
||||
* concatenated with the base rule set by its PF. Hence PF entries
|
||||
* should be at lower priority compared to VF entries. Otherwise
|
||||
* base rule is hit always and rules installed by VF will be of
|
||||
* no use. Hence if the request is from PF then allocate low
|
||||
* priority entries.
|
||||
*/
|
||||
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
|
||||
goto lprio_alloc;
|
||||
|
||||
/* Get the search range for priority allocation request */
|
||||
if (req->priority) {
|
||||
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
|
||||
goto alloc;
|
||||
}
|
||||
|
||||
/* For a VF base MCAM match rule is set by its PF. And all the
|
||||
* further MCAM rules installed by VF on its own are
|
||||
* concatenated with the base rule set by its PF. Hence PF entries
|
||||
* should be at lower priority compared to VF entries. Otherwise
|
||||
* base rule is hit always and rules installed by VF will be of
|
||||
* no use. Hence if the request is from PF and NOT a priority
|
||||
* allocation request then allocate low priority entries.
|
||||
*/
|
||||
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
|
||||
goto lprio_alloc;
|
||||
|
||||
/* Find out the search range for non-priority allocation request
|
||||
*
|
||||
* Get MCAM free entry count in middle zone.
|
||||
@@ -2568,6 +2567,18 @@ lprio_alloc:
|
||||
reverse = true;
|
||||
start = 0;
|
||||
end = mcam->bmap_entries;
|
||||
/* Ensure PF requests are always at bottom and if PF requests
|
||||
* for higher/lower priority entry wrt reference entry then
|
||||
* honour that criteria and start search for entries from bottom
|
||||
* and not in mid zone.
|
||||
*/
|
||||
if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
|
||||
req->priority == NPC_MCAM_HIGHER_PRIO)
|
||||
end = req->ref_entry;
|
||||
|
||||
if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
|
||||
req->priority == NPC_MCAM_LOWER_PRIO)
|
||||
start = req->ref_entry;
|
||||
}
|
||||
|
||||
alloc:
|
||||
|
||||
@@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
{
|
||||
const struct mtk_soc_data *soc = eth->soc;
|
||||
dma_addr_t phy_ring_tail;
|
||||
int cnt = MTK_QDMA_RING_SIZE;
|
||||
int cnt = soc->tx.fq_dma_size;
|
||||
dma_addr_t dma_addr;
|
||||
int i;
|
||||
int i, j, len;
|
||||
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
|
||||
eth->scratch_ring = eth->sram_base;
|
||||
@@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
cnt * soc->tx.desc_size,
|
||||
ð->phy_scratch_ring,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (unlikely(!eth->scratch_ring))
|
||||
return -ENOMEM;
|
||||
|
||||
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
|
||||
if (unlikely(!eth->scratch_head))
|
||||
return -ENOMEM;
|
||||
|
||||
dma_addr = dma_map_single(eth->dma_dev,
|
||||
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
||||
struct mtk_tx_dma_v2 *txd;
|
||||
for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
|
||||
len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
|
||||
eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
|
||||
|
||||
txd = eth->scratch_ring + i * soc->tx.desc_size;
|
||||
txd->txd1 = addr;
|
||||
if (i < cnt - 1)
|
||||
txd->txd2 = eth->phy_scratch_ring +
|
||||
(i + 1) * soc->tx.desc_size;
|
||||
if (unlikely(!eth->scratch_head[j]))
|
||||
return -ENOMEM;
|
||||
|
||||
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
|
||||
txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
|
||||
txd->txd4 = 0;
|
||||
if (mtk_is_netsys_v2_or_greater(eth)) {
|
||||
txd->txd5 = 0;
|
||||
txd->txd6 = 0;
|
||||
txd->txd7 = 0;
|
||||
txd->txd8 = 0;
|
||||
dma_addr = dma_map_single(eth->dma_dev,
|
||||
eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct mtk_tx_dma_v2 *txd;
|
||||
|
||||
txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
|
||||
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
||||
if (j * MTK_FQ_DMA_LENGTH + i < cnt)
|
||||
txd->txd2 = eth->phy_scratch_ring +
|
||||
(j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
|
||||
|
||||
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
|
||||
txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
|
||||
|
||||
txd->txd4 = 0;
|
||||
if (mtk_is_netsys_v2_or_greater(eth)) {
|
||||
txd->txd5 = 0;
|
||||
txd->txd6 = 0;
|
||||
txd->txd7 = 0;
|
||||
txd->txd8 = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
||||
ring_size = MTK_QDMA_RING_SIZE;
|
||||
else
|
||||
ring_size = MTK_DMA_SIZE;
|
||||
ring_size = soc->tx.dma_size;
|
||||
|
||||
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
|
||||
GFP_KERNEL);
|
||||
@@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
|
||||
goto no_tx_mem;
|
||||
|
||||
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
|
||||
ring->dma = eth->sram_base + ring_size * sz;
|
||||
ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
|
||||
ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
|
||||
ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
|
||||
} else {
|
||||
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
||||
&ring->phys, GFP_KERNEL);
|
||||
@@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
|
||||
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
{
|
||||
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
|
||||
const struct mtk_soc_data *soc = eth->soc;
|
||||
struct mtk_rx_ring *ring;
|
||||
int rx_data_len, rx_dma_size, tx_ring_size;
|
||||
int i;
|
||||
@@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
|
||||
tx_ring_size = MTK_QDMA_RING_SIZE;
|
||||
else
|
||||
tx_ring_size = MTK_DMA_SIZE;
|
||||
tx_ring_size = soc->tx.dma_size;
|
||||
|
||||
if (rx_flag == MTK_RX_FLAGS_QDMA) {
|
||||
if (ring_no)
|
||||
@@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
|
||||
} else {
|
||||
rx_data_len = ETH_DATA_LEN;
|
||||
rx_dma_size = MTK_DMA_SIZE;
|
||||
rx_dma_size = soc->rx.dma_size;
|
||||
}
|
||||
|
||||
ring->frag_size = mtk_max_frag_size(rx_data_len);
|
||||
@@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
|
||||
mtk_rx_clean(eth, ð->rx_ring[i], false);
|
||||
}
|
||||
|
||||
kfree(eth->scratch_head);
|
||||
for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
|
||||
kfree(eth->scratch_head[i]);
|
||||
eth->scratch_head[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool mtk_hw_reset_check(struct mtk_eth *eth)
|
||||
@@ -5052,11 +5062,14 @@ static const struct mtk_soc_data mt2701_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
.irq_done_mask = MTK_RX_DONE_INT,
|
||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
},
|
||||
@@ -5076,11 +5089,14 @@ static const struct mtk_soc_data mt7621_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
.irq_done_mask = MTK_RX_DONE_INT,
|
||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
},
|
||||
@@ -5102,11 +5118,14 @@ static const struct mtk_soc_data mt7622_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
.irq_done_mask = MTK_RX_DONE_INT,
|
||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
},
|
||||
@@ -5127,11 +5146,14 @@ static const struct mtk_soc_data mt7623_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
.irq_done_mask = MTK_RX_DONE_INT,
|
||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
},
|
||||
@@ -5150,11 +5172,14 @@ static const struct mtk_soc_data mt7629_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
.irq_done_mask = MTK_RX_DONE_INT,
|
||||
.dma_l4_valid = RX_DMA_L4_VALID,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
},
|
||||
@@ -5176,6 +5201,8 @@ static const struct mtk_soc_data mt7981_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||
.dma_len_offset = 8,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
@@ -5183,6 +5210,7 @@ static const struct mtk_soc_data mt7981_data = {
|
||||
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -5202,6 +5230,8 @@ static const struct mtk_soc_data mt7986_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||
.dma_len_offset = 8,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
@@ -5209,6 +5239,7 @@ static const struct mtk_soc_data mt7986_data = {
|
||||
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -5228,6 +5259,8 @@ static const struct mtk_soc_data mt7988_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma_v2),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||
.dma_len_offset = 8,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
.fq_dma_size = MTK_DMA_SIZE(4K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma_v2),
|
||||
@@ -5235,6 +5268,7 @@ static const struct mtk_soc_data mt7988_data = {
|
||||
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
||||
.dma_len_offset = 8,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -5249,6 +5283,7 @@ static const struct mtk_soc_data rt5350_data = {
|
||||
.desc_size = sizeof(struct mtk_tx_dma),
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
.rx = {
|
||||
.desc_size = sizeof(struct mtk_rx_dma),
|
||||
@@ -5256,6 +5291,7 @@ static const struct mtk_soc_data rt5350_data = {
|
||||
.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
|
||||
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
||||
.dma_len_offset = 16,
|
||||
.dma_size = MTK_DMA_SIZE(2K),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -32,7 +32,9 @@
|
||||
#define MTK_TX_DMA_BUF_LEN 0x3fff
|
||||
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
||||
#define MTK_QDMA_RING_SIZE 2048
|
||||
#define MTK_DMA_SIZE 512
|
||||
#define MTK_DMA_SIZE(x) (SZ_##x)
|
||||
#define MTK_FQ_DMA_HEAD 32
|
||||
#define MTK_FQ_DMA_LENGTH 2048
|
||||
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
|
||||
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
|
||||
#define MTK_DMA_DUMMY_DESC 0xffffffff
|
||||
@@ -1176,6 +1178,8 @@ struct mtk_soc_data {
|
||||
u32 desc_size;
|
||||
u32 dma_max_len;
|
||||
u32 dma_len_offset;
|
||||
u32 dma_size;
|
||||
u32 fq_dma_size;
|
||||
} tx;
|
||||
struct {
|
||||
u32 desc_size;
|
||||
@@ -1183,6 +1187,7 @@ struct mtk_soc_data {
|
||||
u32 dma_l4_valid;
|
||||
u32 dma_max_len;
|
||||
u32 dma_len_offset;
|
||||
u32 dma_size;
|
||||
} rx;
|
||||
};
|
||||
|
||||
@@ -1264,7 +1269,7 @@ struct mtk_eth {
|
||||
struct napi_struct rx_napi;
|
||||
void *scratch_ring;
|
||||
dma_addr_t phy_scratch_ring;
|
||||
void *scratch_head;
|
||||
void *scratch_head[MTK_FQ_DMA_HEAD];
|
||||
struct clk *clks[MTK_CLK_MAX];
|
||||
|
||||
struct mii_bus *mii_bus;
|
||||
|
||||
@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
|
||||
do {
|
||||
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
|
||||
break;
|
||||
if (pci_channel_offline(dev->pdev)) {
|
||||
mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
} while (!time_after(jiffies, end));
|
||||
|
||||
@@ -248,6 +248,10 @@ recover_from_sw_reset:
|
||||
do {
|
||||
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
|
||||
break;
|
||||
if (pci_channel_offline(dev->pdev)) {
|
||||
mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
msleep(20);
|
||||
} while (!time_after(jiffies, end));
|
||||
@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
|
||||
mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (pci_channel_offline(dev->pdev)) {
|
||||
mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
|
||||
return -EACCES;
|
||||
}
|
||||
msleep(100);
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
|
||||
&dest, 1);
|
||||
if (IS_ERR(lag_definer->rules[idx])) {
|
||||
err = PTR_ERR(lag_definer->rules[idx]);
|
||||
while (i--)
|
||||
while (j--)
|
||||
do {
|
||||
while (j--) {
|
||||
idx = i * ldev->buckets + j;
|
||||
mlx5_del_flow_rules(lag_definer->rules[idx]);
|
||||
}
|
||||
j = ldev->buckets;
|
||||
} while (i--);
|
||||
goto destroy_fg;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
|
||||
ret = -EBUSY;
|
||||
goto pci_unlock;
|
||||
}
|
||||
if (pci_channel_offline(dev->pdev)) {
|
||||
ret = -EACCES;
|
||||
goto pci_unlock;
|
||||
}
|
||||
|
||||
/* Check if semaphore is already locked */
|
||||
ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
|
||||
|
||||
@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
|
||||
|
||||
if (!err)
|
||||
mlx5_function_disable(dev, boot);
|
||||
else
|
||||
mlx5_stop_health_poll(dev, boot);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
|
||||
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
|
||||
goto out_xdp_abort;
|
||||
}
|
||||
buf_info->page = NULL;
|
||||
stats->xdp_tx++;
|
||||
|
||||
/* the Tx completion will free the buffers */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user