mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
Bug fixes overlapping feature additions and refactoring, mostly. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
10
MAINTAINERS
10
MAINTAINERS
@@ -12910,7 +12910,7 @@ F: net/ipv4/nexthop.c
|
||||
|
||||
NFC SUBSYSTEM
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
L: linux-nfc@lists.01.org (moderated for non-subscribers)
|
||||
L: linux-nfc@lists.01.org (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/nfc/
|
||||
@@ -12923,7 +12923,7 @@ F: net/nfc/
|
||||
NFC VIRTUAL NCI DEVICE DRIVER
|
||||
M: Bongsu Jeon <bongsu.jeon@samsung.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-nfc@lists.01.org (moderated for non-subscribers)
|
||||
L: linux-nfc@lists.01.org (subscribers-only)
|
||||
S: Supported
|
||||
F: drivers/nfc/virtual_ncidev.c
|
||||
F: tools/testing/selftests/nci/
|
||||
@@ -13229,7 +13229,7 @@ F: sound/soc/codecs/tfa9879*
|
||||
|
||||
NXP-NCI NFC DRIVER
|
||||
R: Charles Gorand <charles.gorand@effinnov.com>
|
||||
L: linux-nfc@lists.01.org (moderated for non-subscribers)
|
||||
L: linux-nfc@lists.01.org (subscribers-only)
|
||||
S: Supported
|
||||
F: drivers/nfc/nxp-nci
|
||||
|
||||
@@ -16156,7 +16156,7 @@ F: include/media/drv-intf/s3c_camif.h
|
||||
SAMSUNG S3FWRN5 NFC DRIVER
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
|
||||
M: Krzysztof Opasiak <k.opasiak@samsung.com>
|
||||
L: linux-nfc@lists.01.org (moderated for non-subscribers)
|
||||
L: linux-nfc@lists.01.org (subscribers-only)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
|
||||
F: drivers/nfc/s3fwrn5
|
||||
@@ -18347,7 +18347,7 @@ F: sound/soc/codecs/tas571x*
|
||||
TI TRF7970A NFC DRIVER
|
||||
M: Mark Greer <mgreer@animalcreek.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: linux-nfc@lists.01.org (moderated for non-subscribers)
|
||||
L: linux-nfc@lists.01.org (subscribers-only)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
|
||||
F: drivers/nfc/trf7970a.c
|
||||
|
||||
@@ -2527,10 +2527,17 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
|
||||
}
|
||||
|
||||
btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
|
||||
err = request_firmware(&fw, fwname, &hdev->dev);
|
||||
err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
|
||||
if (err < 0) {
|
||||
if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
|
||||
/* Firmware has already been loaded */
|
||||
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
|
||||
fwname, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -2680,12 +2687,24 @@ download:
|
||||
err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
|
||||
sizeof(fwname), "sfi");
|
||||
if (err < 0) {
|
||||
if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
|
||||
/* Firmware has already been loaded */
|
||||
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bt_dev_err(hdev, "Unsupported Intel firmware naming");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = request_firmware(&fw, fwname, &hdev->dev);
|
||||
err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
|
||||
if (err < 0) {
|
||||
if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
|
||||
/* Firmware has already been loaded */
|
||||
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
|
||||
fwname, err);
|
||||
return err;
|
||||
|
||||
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
|
||||
bool persistent, u8 *smt_idx);
|
||||
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
|
||||
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
|
||||
int cxgb_open(struct net_device *dev);
|
||||
int cxgb_close(struct net_device *dev);
|
||||
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
|
||||
void cxgb4_quiesce_rx(struct sge_rspq *q);
|
||||
int cxgb4_port_mirror_alloc(struct net_device *dev);
|
||||
|
||||
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
|
||||
/*
|
||||
* net_device operations
|
||||
*/
|
||||
int cxgb_open(struct net_device *dev)
|
||||
static int cxgb_open(struct net_device *dev)
|
||||
{
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
struct adapter *adapter = pi->adapter;
|
||||
@@ -2882,7 +2882,7 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
int cxgb_close(struct net_device *dev)
|
||||
static int cxgb_close(struct net_device *dev)
|
||||
{
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
struct adapter *adapter = pi->adapter;
|
||||
|
||||
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
|
||||
if (!ch_flower)
|
||||
return -ENOENT;
|
||||
|
||||
rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
|
||||
adap->flower_ht_params);
|
||||
|
||||
ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
|
||||
&ch_flower->fs, ch_flower->filter_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
|
||||
ch_flower->filter_id, ret);
|
||||
|
||||
ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
|
||||
adap->flower_ht_params);
|
||||
if (ret) {
|
||||
netdev_err(dev, "Flow remove from rhashtable failed");
|
||||
goto err;
|
||||
}
|
||||
kfree_rcu(ch_flower, rcu);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
|
||||
* down before configuring tc params.
|
||||
*/
|
||||
if (netif_running(dev)) {
|
||||
cxgb_close(dev);
|
||||
netif_tx_stop_all_queues(dev);
|
||||
netif_carrier_off(dev);
|
||||
needs_bring_up = true;
|
||||
}
|
||||
|
||||
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
|
||||
}
|
||||
|
||||
out:
|
||||
if (needs_bring_up)
|
||||
cxgb_open(dev);
|
||||
if (needs_bring_up) {
|
||||
netif_tx_start_all_queues(dev);
|
||||
netif_carrier_on(dev);
|
||||
}
|
||||
|
||||
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
|
||||
return ret;
|
||||
|
||||
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
|
||||
if (!eosw_txq)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!(adap->flags & CXGB4_FW_OK)) {
|
||||
/* Don't stall caller when access to FW is lost */
|
||||
complete(&eosw_txq->completion);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
skb = alloc_skb(len, GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
|
||||
case XDP_TX:
|
||||
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
||||
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
|
||||
if (result == I40E_XDP_CONSUMED)
|
||||
goto out_failure;
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
||||
result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
|
||||
if (err)
|
||||
goto out_failure;
|
||||
result = I40E_XDP_REDIR;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
out_failure:
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
|
||||
fallthrough; /* handle aborts by dropping packet */
|
||||
case XDP_DROP:
|
||||
|
||||
@@ -162,9 +162,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
|
||||
|
||||
if (likely(act == XDP_REDIRECT)) {
|
||||
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
||||
result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
|
||||
if (err)
|
||||
goto out_failure;
|
||||
rcu_read_unlock();
|
||||
return result;
|
||||
return I40E_XDP_REDIR;
|
||||
}
|
||||
|
||||
switch (act) {
|
||||
@@ -173,11 +174,14 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
|
||||
case XDP_TX:
|
||||
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
||||
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
|
||||
if (result == I40E_XDP_CONSUMED)
|
||||
goto out_failure;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
out_failure:
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
|
||||
fallthrough; /* handle aborts by dropping packet */
|
||||
case XDP_DROP:
|
||||
|
||||
@@ -341,6 +341,7 @@ struct ice_vsi {
|
||||
struct ice_tc_cfg tc_cfg;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct ice_ring **xdp_rings; /* XDP ring array */
|
||||
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
|
||||
u16 num_xdp_txq; /* Used XDP queues */
|
||||
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
|
||||
|
||||
@@ -559,15 +560,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
|
||||
*/
|
||||
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
|
||||
{
|
||||
struct ice_vsi *vsi = ring->vsi;
|
||||
u16 qid = ring->q_index;
|
||||
|
||||
if (ice_ring_is_xdp(ring))
|
||||
qid -= ring->vsi->num_xdp_txq;
|
||||
qid -= vsi->num_xdp_txq;
|
||||
|
||||
if (!ice_is_xdp_ena_vsi(ring->vsi))
|
||||
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
|
||||
return NULL;
|
||||
|
||||
return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
|
||||
return xsk_get_pool_from_qid(vsi->netdev, qid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
|
||||
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
|
||||
100000baseKR4_Full);
|
||||
}
|
||||
|
||||
/* Autoneg PHY types */
|
||||
if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
Autoneg);
|
||||
}
|
||||
if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
Autoneg);
|
||||
}
|
||||
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
|
||||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
Autoneg);
|
||||
}
|
||||
}
|
||||
|
||||
#define TEST_SET_BITS_TIMEOUT 50
|
||||
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
|
||||
ks->base.port = PORT_TP;
|
||||
break;
|
||||
case ICE_MEDIA_BACKPLANE:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
Backplane);
|
||||
ks->base.port = PORT_NONE;
|
||||
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
|
||||
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
|
||||
|
||||
/* Set supported and advertised autoneg */
|
||||
if (ice_is_phy_caps_an_enabled(caps)) {
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
}
|
||||
|
||||
done:
|
||||
kfree(caps);
|
||||
return err;
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
|
||||
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
|
||||
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
|
||||
#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
|
||||
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
|
||||
#define PF_FW_ATQT 0x00080400
|
||||
#define PF_MBX_ARQBAH 0x0022E400
|
||||
|
||||
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
|
||||
if (!vsi->q_vectors)
|
||||
goto err_vectors;
|
||||
|
||||
vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
|
||||
if (!vsi->af_xdp_zc_qps)
|
||||
goto err_zc_qps;
|
||||
|
||||
return 0;
|
||||
|
||||
err_zc_qps:
|
||||
devm_kfree(dev, vsi->q_vectors);
|
||||
err_vectors:
|
||||
devm_kfree(dev, vsi->rxq_map);
|
||||
err_rxq_map:
|
||||
@@ -194,6 +200,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
|
||||
break;
|
||||
case ICE_VSI_VF:
|
||||
vf = &pf->vf[vsi->vf_id];
|
||||
if (vf->num_req_qs)
|
||||
vf->num_vf_qs = vf->num_req_qs;
|
||||
vsi->alloc_txq = vf->num_vf_qs;
|
||||
vsi->alloc_rxq = vf->num_vf_qs;
|
||||
/* pf->num_msix_per_vf includes (VF miscellaneous vector +
|
||||
@@ -288,6 +296,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
if (vsi->af_xdp_zc_qps) {
|
||||
bitmap_free(vsi->af_xdp_zc_qps);
|
||||
vsi->af_xdp_zc_qps = NULL;
|
||||
}
|
||||
/* free the ring and vector containers */
|
||||
if (vsi->q_vectors) {
|
||||
devm_kfree(dev, vsi->q_vectors);
|
||||
|
||||
@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct ice_ring *xdp_ring;
|
||||
int err;
|
||||
int err, result;
|
||||
u32 act;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
||||
@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
|
||||
return ICE_XDP_PASS;
|
||||
case XDP_TX:
|
||||
xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
|
||||
return ice_xmit_xdp_buff(xdp, xdp_ring);
|
||||
result = ice_xmit_xdp_buff(xdp, xdp_ring);
|
||||
if (result == ICE_XDP_CONSUMED)
|
||||
goto out_failure;
|
||||
return result;
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
||||
return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
|
||||
if (err)
|
||||
goto out_failure;
|
||||
return ICE_XDP_REDIR;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
out_failure:
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
@@ -2143,6 +2149,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
|
||||
struct ice_tx_offload_params offload = { 0 };
|
||||
struct ice_vsi *vsi = tx_ring->vsi;
|
||||
struct ice_tx_buf *first;
|
||||
struct ethhdr *eth;
|
||||
unsigned int count;
|
||||
int tso, csum;
|
||||
|
||||
@@ -2189,7 +2196,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
|
||||
goto out_drop;
|
||||
|
||||
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
|
||||
if (unlikely(skb->priority == TC_PRIO_CONTROL &&
|
||||
eth = (struct ethhdr *)skb_mac_header(skb);
|
||||
if (unlikely((skb->priority == TC_PRIO_CONTROL ||
|
||||
eth->h_proto == htons(ETH_P_LLDP)) &&
|
||||
vsi->type == ICE_VSI_PF &&
|
||||
vsi->port_info->qos_cfg.is_sw_lldp))
|
||||
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
|
||||
|
||||
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
|
||||
*/
|
||||
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
|
||||
|
||||
/* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
|
||||
* in the case of VFR. If this is done for PFR, it can mess up VF
|
||||
* resets because the VF driver may already have started cleanup
|
||||
* by the time we get here.
|
||||
/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
|
||||
* needs to clear them in the case of VFR/VFLR. If this is done for
|
||||
* PFR, it can mess up VF resets because the VF driver may already
|
||||
* have started cleanup by the time we get here.
|
||||
*/
|
||||
if (!is_pfr)
|
||||
if (!is_pfr) {
|
||||
wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
|
||||
wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
|
||||
}
|
||||
|
||||
/* In the case of a VFLR, the HW has already reset the VF and we
|
||||
* just need to clean up, so don't hit the VFRTRIG register.
|
||||
@@ -1698,7 +1700,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
|
||||
ice_vf_ctrl_vsi_release(vf);
|
||||
|
||||
ice_vf_pre_vsi_rebuild(vf);
|
||||
ice_vf_rebuild_vsi_with_release(vf);
|
||||
|
||||
if (ice_vf_rebuild_vsi_with_release(vf)) {
|
||||
dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
ice_vf_post_vsi_rebuild(vf);
|
||||
|
||||
/* if the VF has been reset allow it to come up again */
|
||||
|
||||
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
|
||||
if (!pool)
|
||||
return -EINVAL;
|
||||
|
||||
clear_bit(qid, vsi->af_xdp_zc_qps);
|
||||
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
|
||||
|
||||
return 0;
|
||||
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set_bit(qid, vsi->af_xdp_zc_qps);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -473,9 +476,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
|
||||
|
||||
if (likely(act == XDP_REDIRECT)) {
|
||||
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
||||
result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
|
||||
if (err)
|
||||
goto out_failure;
|
||||
rcu_read_unlock();
|
||||
return result;
|
||||
return ICE_XDP_REDIR;
|
||||
}
|
||||
|
||||
switch (act) {
|
||||
@@ -484,11 +488,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
|
||||
case XDP_TX:
|
||||
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
|
||||
result = ice_xmit_xdp_buff(xdp, xdp_ring);
|
||||
if (result == ICE_XDP_CONSUMED)
|
||||
goto out_failure;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
out_failure:
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
|
||||
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
|
||||
void igb_ptp_tx_hang(struct igb_adapter *adapter);
|
||||
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
|
||||
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
struct sk_buff *skb);
|
||||
ktime_t *timestamp);
|
||||
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
|
||||
|
||||
@@ -8281,7 +8281,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
|
||||
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
|
||||
struct igb_rx_buffer *rx_buffer,
|
||||
struct xdp_buff *xdp,
|
||||
union e1000_adv_rx_desc *rx_desc)
|
||||
ktime_t timestamp)
|
||||
{
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
|
||||
@@ -8301,12 +8301,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
|
||||
if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
|
||||
xdp->data += IGB_TS_HDR_LEN;
|
||||
size -= IGB_TS_HDR_LEN;
|
||||
}
|
||||
}
|
||||
if (timestamp)
|
||||
skb_hwtstamps(skb)->hwtstamp = timestamp;
|
||||
|
||||
/* Determine available headroom for copy */
|
||||
headlen = size;
|
||||
@@ -8337,7 +8333,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
|
||||
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
|
||||
struct igb_rx_buffer *rx_buffer,
|
||||
struct xdp_buff *xdp,
|
||||
union e1000_adv_rx_desc *rx_desc)
|
||||
ktime_t timestamp)
|
||||
{
|
||||
#if (PAGE_SIZE < 8192)
|
||||
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
|
||||
@@ -8364,11 +8360,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
|
||||
if (metasize)
|
||||
skb_metadata_set(skb, metasize);
|
||||
|
||||
/* pull timestamp out of packet data */
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
||||
if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
|
||||
__skb_pull(skb, IGB_TS_HDR_LEN);
|
||||
}
|
||||
if (timestamp)
|
||||
skb_hwtstamps(skb)->hwtstamp = timestamp;
|
||||
|
||||
/* update buffer offset */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
@@ -8402,18 +8395,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
|
||||
break;
|
||||
case XDP_TX:
|
||||
result = igb_xdp_xmit_back(adapter, xdp);
|
||||
if (result == IGB_XDP_CONSUMED)
|
||||
goto out_failure;
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
|
||||
if (!err)
|
||||
result = IGB_XDP_REDIR;
|
||||
else
|
||||
result = IGB_XDP_CONSUMED;
|
||||
if (err)
|
||||
goto out_failure;
|
||||
result = IGB_XDP_REDIR;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
out_failure:
|
||||
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
@@ -8683,7 +8678,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
||||
while (likely(total_packets < budget)) {
|
||||
union e1000_adv_rx_desc *rx_desc;
|
||||
struct igb_rx_buffer *rx_buffer;
|
||||
ktime_t timestamp = 0;
|
||||
int pkt_offset = 0;
|
||||
unsigned int size;
|
||||
void *pktbuf;
|
||||
|
||||
/* return some buffers to hardware, one at a time is too slow */
|
||||
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
|
||||
@@ -8703,14 +8701,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
||||
dma_rmb();
|
||||
|
||||
rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
|
||||
pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
|
||||
|
||||
/* pull rx packet timestamp if available and valid */
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
||||
int ts_hdr_len;
|
||||
|
||||
ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
|
||||
pktbuf, ×tamp);
|
||||
|
||||
pkt_offset += ts_hdr_len;
|
||||
size -= ts_hdr_len;
|
||||
}
|
||||
|
||||
/* retrieve a buffer from the ring */
|
||||
if (!skb) {
|
||||
unsigned int offset = igb_rx_offset(rx_ring);
|
||||
unsigned char *hard_start;
|
||||
unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
|
||||
unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
|
||||
|
||||
hard_start = page_address(rx_buffer->page) +
|
||||
rx_buffer->page_offset - offset;
|
||||
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
|
||||
#if (PAGE_SIZE > 4096)
|
||||
/* At larger PAGE_SIZE, frame_sz depend on len size */
|
||||
@@ -8733,10 +8741,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
||||
} else if (skb)
|
||||
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
|
||||
else if (ring_uses_build_skb(rx_ring))
|
||||
skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
|
||||
skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
|
||||
timestamp);
|
||||
else
|
||||
skb = igb_construct_skb(rx_ring, rx_buffer,
|
||||
&xdp, rx_desc);
|
||||
&xdp, timestamp);
|
||||
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
if (!skb) {
|
||||
|
||||
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
#define IGB_RET_PTP_DISABLED 1
|
||||
#define IGB_RET_PTP_INVALID 2
|
||||
|
||||
/**
|
||||
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
|
||||
* @q_vector: Pointer to interrupt specific structure
|
||||
* @va: Pointer to address containing Rx buffer
|
||||
* @skb: Buffer containing timestamp and packet
|
||||
* @timestamp: Pointer where timestamp will be stored
|
||||
*
|
||||
* This function is meant to retrieve a timestamp from the first buffer of an
|
||||
* incoming frame. The value is stored in little endian format starting on
|
||||
* byte 8
|
||||
*
|
||||
* Returns: 0 if success, nonzero if failure
|
||||
* Returns: The timestamp header length or 0 if not available
|
||||
**/
|
||||
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
struct sk_buff *skb)
|
||||
ktime_t *timestamp)
|
||||
{
|
||||
struct igb_adapter *adapter = q_vector->adapter;
|
||||
struct skb_shared_hwtstamps ts;
|
||||
__le64 *regval = (__le64 *)va;
|
||||
int adjust = 0;
|
||||
|
||||
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
|
||||
return IGB_RET_PTP_DISABLED;
|
||||
return 0;
|
||||
|
||||
/* The timestamp is recorded in little endian format.
|
||||
* DWORD: 0 1 2 3
|
||||
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
|
||||
/* check reserved dwords are zero, be/le doesn't matter for zero */
|
||||
if (regval[0])
|
||||
return IGB_RET_PTP_INVALID;
|
||||
return 0;
|
||||
|
||||
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
|
||||
le64_to_cpu(regval[1]));
|
||||
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
|
||||
|
||||
/* adjust timestamp for the RX latency based on link speed */
|
||||
if (adapter->hw.mac.type == e1000_i210) {
|
||||
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
break;
|
||||
}
|
||||
}
|
||||
skb_hwtstamps(skb)->hwtstamp =
|
||||
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
|
||||
|
||||
return 0;
|
||||
*timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
|
||||
|
||||
return IGB_TS_HDR_LEN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -2214,15 +2214,19 @@ static int __igc_xdp_run_prog(struct igc_adapter *adapter,
|
||||
case XDP_PASS:
|
||||
return IGC_XDP_PASS;
|
||||
case XDP_TX:
|
||||
return igc_xdp_xmit_back(adapter, xdp) < 0 ?
|
||||
IGC_XDP_CONSUMED : IGC_XDP_TX;
|
||||
if (igc_xdp_xmit_back(adapter, xdp) < 0)
|
||||
goto out_failure;
|
||||
return IGC_XDP_TX;
|
||||
case XDP_REDIRECT:
|
||||
return xdp_do_redirect(adapter->netdev, xdp, prog) < 0 ?
|
||||
IGC_XDP_CONSUMED : IGC_XDP_REDIRECT;
|
||||
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
|
||||
goto out_failure;
|
||||
return IGC_XDP_REDIRECT;
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
out_failure:
|
||||
trace_xdp_exception(adapter->netdev, prog, act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user