You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
include/net/sock.h commit8f905c0e73("inet: fully convert sk->sk_rx_dst to RCU rules") commit43f51df417("net: move early demux fields close to sk_refcnt") https://lore.kernel.org/all/20211222141641.0caa0ab3@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -196,11 +196,12 @@ ad_actor_sys_prio
|
||||
ad_actor_system
|
||||
|
||||
In an AD system, this specifies the mac-address for the actor in
|
||||
protocol packet exchanges (LACPDUs). The value cannot be NULL or
|
||||
multicast. It is preferred to have the local-admin bit set for this
|
||||
mac but driver does not enforce it. If the value is not given then
|
||||
system defaults to using the masters' mac address as actors' system
|
||||
address.
|
||||
protocol packet exchanges (LACPDUs). The value cannot be a multicast
|
||||
address. If the all-zeroes MAC is specified, bonding will internally
|
||||
use the MAC of the bond itself. It is preferred to have the
|
||||
local-admin bit set for this mac but driver does not enforce it. If
|
||||
the value is not given then system defaults to using the masters'
|
||||
mac address as actors' system address.
|
||||
|
||||
This parameter has effect only in 802.3ad mode and is available through
|
||||
SysFs interface.
|
||||
|
||||
@@ -183,6 +183,7 @@ PHY and allows physical transmission and reception of Ethernet frames.
|
||||
IRQ config, enable, reset
|
||||
|
||||
DPNI (Datapath Network Interface)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Contains TX/RX queues, network interface configuration, and RX buffer pool
|
||||
configuration mechanisms. The TX/RX queues are in memory and are identified
|
||||
by queue number.
|
||||
|
||||
@@ -582,8 +582,8 @@ Time stamps for outgoing packets are to be generated as follows:
|
||||
and hardware timestamping is not possible (SKBTX_IN_PROGRESS not set).
|
||||
- As soon as the driver has sent the packet and/or obtained a
|
||||
hardware time stamp for it, it passes the time stamp back by
|
||||
calling skb_hwtstamp_tx() with the original skb, the raw
|
||||
hardware time stamp. skb_hwtstamp_tx() clones the original skb and
|
||||
calling skb_tstamp_tx() with the original skb, the raw
|
||||
hardware time stamp. skb_tstamp_tx() clones the original skb and
|
||||
adds the timestamps, therefore the original skb has to be freed now.
|
||||
If obtaining the hardware time stamp somehow fails, then the driver
|
||||
should not fall back to software time stamping. The rationale is that
|
||||
|
||||
@@ -1554,7 +1554,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
|
||||
mac = (u8 *)&newval->value;
|
||||
}
|
||||
|
||||
if (!is_valid_ether_addr(mac))
|
||||
if (is_multicast_ether_addr(mac))
|
||||
goto err;
|
||||
|
||||
netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
|
||||
|
||||
@@ -6,6 +6,18 @@
|
||||
#include "ice_lib.h"
|
||||
#include "ice_dcb_lib.h"
|
||||
|
||||
static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
|
||||
return !!rx_ring->xdp_buf;
|
||||
}
|
||||
|
||||
static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
|
||||
return !!rx_ring->rx_buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
|
||||
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
|
||||
@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||||
ring->q_index, ring->q_vector->napi.napi_id);
|
||||
|
||||
kfree(ring->rx_buf);
|
||||
ring->xsk_pool = ice_xsk_pool(ring);
|
||||
if (ring->xsk_pool) {
|
||||
if (!ice_alloc_rx_buf_zc(ring))
|
||||
return -ENOMEM;
|
||||
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||||
|
||||
ring->rx_buf_len =
|
||||
@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
|
||||
ring->q_index);
|
||||
} else {
|
||||
if (!ice_alloc_rx_buf(ring))
|
||||
return -ENOMEM;
|
||||
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
|
||||
/* coverity[check_return] */
|
||||
xdp_rxq_info_reg(&ring->xdp_rxq,
|
||||
|
||||
@@ -427,7 +427,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
}
|
||||
|
||||
rx_skip_free:
|
||||
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
|
||||
if (rx_ring->xsk_pool)
|
||||
memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
|
||||
else
|
||||
memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
|
||||
|
||||
/* Zero out the descriptor ring */
|
||||
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
|
||||
@@ -454,8 +457,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
|
||||
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
||||
rx_ring->xdp_prog = NULL;
|
||||
devm_kfree(rx_ring->dev, rx_ring->rx_buf);
|
||||
rx_ring->rx_buf = NULL;
|
||||
if (rx_ring->xsk_pool) {
|
||||
kfree(rx_ring->xdp_buf);
|
||||
rx_ring->xdp_buf = NULL;
|
||||
} else {
|
||||
kfree(rx_ring->rx_buf);
|
||||
rx_ring->rx_buf = NULL;
|
||||
}
|
||||
|
||||
if (rx_ring->desc) {
|
||||
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
|
||||
@@ -483,8 +491,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
/* warn if we are about to overwrite the pointer */
|
||||
WARN_ON(rx_ring->rx_buf);
|
||||
rx_ring->rx_buf =
|
||||
devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
|
||||
GFP_KERNEL);
|
||||
kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
|
||||
if (!rx_ring->rx_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -513,7 +520,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
return 0;
|
||||
|
||||
err:
|
||||
devm_kfree(dev, rx_ring->rx_buf);
|
||||
kfree(rx_ring->rx_buf);
|
||||
rx_ring->rx_buf = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
|
||||
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
|
||||
|
||||
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
|
||||
#define ICE_MAX_TXQ_PER_TXQG 128
|
||||
|
||||
/* Attempt to maximize the headroom available for incoming frames. We use a 2K
|
||||
|
||||
@@ -12,6 +12,11 @@
|
||||
#include "ice_txrx_lib.h"
|
||||
#include "ice_lib.h"
|
||||
|
||||
static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
|
||||
{
|
||||
return &rx_ring->xdp_buf[idx];
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_qp_reset_stats - Resets all stats for rings of given index
|
||||
* @vsi: VSI that contains rings of interest
|
||||
@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
dma_addr_t dma;
|
||||
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntu);
|
||||
xdp = &rx_ring->xdp_buf[ntu];
|
||||
xdp = ice_xdp_buf(rx_ring, ntu);
|
||||
|
||||
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
|
||||
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
|
||||
@@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
}
|
||||
|
||||
ntu += nb_buffs;
|
||||
if (ntu == rx_ring->count) {
|
||||
rx_desc = ICE_RX_DESC(rx_ring, 0);
|
||||
xdp = rx_ring->xdp_buf;
|
||||
if (ntu == rx_ring->count)
|
||||
ntu = 0;
|
||||
}
|
||||
|
||||
/* clear the status bits for the next_to_use descriptor */
|
||||
rx_desc->wb.status_error0 = 0;
|
||||
ice_release_rx_desc(rx_ring, ntu);
|
||||
|
||||
return count == nb_buffs;
|
||||
@@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
|
||||
/**
|
||||
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
|
||||
* @rx_ring: Rx ring
|
||||
* @xdp_arr: Pointer to the SW ring of xdp_buff pointers
|
||||
* @xdp: Pointer to XDP buffer
|
||||
*
|
||||
* This function allocates a new skb from a zero-copy Rx buffer.
|
||||
*
|
||||
* Returns the skb on success, NULL on failure.
|
||||
*/
|
||||
static struct sk_buff *
|
||||
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
|
||||
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
|
||||
{
|
||||
struct xdp_buff *xdp = *xdp_arr;
|
||||
unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
|
||||
unsigned int metasize = xdp->data - xdp->data_meta;
|
||||
unsigned int datasize = xdp->data_end - xdp->data;
|
||||
unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
|
||||
@@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
|
||||
skb_metadata_set(skb, metasize);
|
||||
|
||||
xsk_buff_free(xdp);
|
||||
*xdp_arr = NULL;
|
||||
return skb;
|
||||
}
|
||||
|
||||
@@ -507,7 +505,6 @@ out_failure:
|
||||
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
|
||||
struct ice_tx_ring *xdp_ring;
|
||||
unsigned int xdp_xmit = 0;
|
||||
struct bpf_prog *xdp_prog;
|
||||
@@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
while (likely(total_rx_packets < (unsigned int)budget)) {
|
||||
union ice_32b_rx_flex_desc *rx_desc;
|
||||
unsigned int size, xdp_res = 0;
|
||||
struct xdp_buff **xdp;
|
||||
struct xdp_buff *xdp;
|
||||
struct sk_buff *skb;
|
||||
u16 stat_err_bits;
|
||||
u16 vlan_tag = 0;
|
||||
@@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
|
||||
|
||||
size = le16_to_cpu(rx_desc->wb.pkt_len) &
|
||||
ICE_RX_FLX_DESC_PKT_LEN_M;
|
||||
if (!size)
|
||||
break;
|
||||
if (!size) {
|
||||
xdp->data = NULL;
|
||||
xdp->data_end = NULL;
|
||||
xdp->data_hard_start = NULL;
|
||||
xdp->data_meta = NULL;
|
||||
goto construct_skb;
|
||||
}
|
||||
|
||||
xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
|
||||
xsk_buff_set_size(*xdp, size);
|
||||
xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
|
||||
xsk_buff_set_size(xdp, size);
|
||||
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
|
||||
|
||||
xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
|
||||
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
|
||||
if (xdp_res) {
|
||||
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
|
||||
xdp_xmit |= xdp_res;
|
||||
else
|
||||
xsk_buff_free(*xdp);
|
||||
xsk_buff_free(xdp);
|
||||
|
||||
*xdp = NULL;
|
||||
total_rx_bytes += size;
|
||||
total_rx_packets++;
|
||||
cleaned_count++;
|
||||
|
||||
ice_bump_ntc(rx_ring);
|
||||
continue;
|
||||
}
|
||||
|
||||
construct_skb:
|
||||
/* XDP_PASS path */
|
||||
skb = ice_construct_skb_zc(rx_ring, xdp);
|
||||
if (!skb) {
|
||||
@@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
break;
|
||||
}
|
||||
|
||||
cleaned_count++;
|
||||
ice_bump_ntc(rx_ring);
|
||||
|
||||
if (eth_skb_pad(skb)) {
|
||||
@@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
ice_receive_skb(rx_ring, skb, vlan_tag);
|
||||
}
|
||||
|
||||
if (cleaned_count >= ICE_RX_BUF_WRITE)
|
||||
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
|
||||
failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
|
||||
|
||||
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
|
||||
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
|
||||
@@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
|
||||
*/
|
||||
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
{
|
||||
u16 i;
|
||||
u16 count_mask = rx_ring->count - 1;
|
||||
u16 ntc = rx_ring->next_to_clean;
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
|
||||
for (i = 0; i < rx_ring->count; i++) {
|
||||
struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
|
||||
for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
|
||||
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
|
||||
|
||||
if (!xdp)
|
||||
continue;
|
||||
|
||||
*xdp = NULL;
|
||||
xsk_buff_free(xdp);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9254,7 +9254,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
|
||||
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
|
||||
}
|
||||
|
||||
static int __maybe_unused igb_resume(struct device *dev)
|
||||
static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
@@ -9297,17 +9297,24 @@ static int __maybe_unused igb_resume(struct device *dev)
|
||||
|
||||
wr32(E1000_WUS, ~0);
|
||||
|
||||
rtnl_lock();
|
||||
if (!rpm)
|
||||
rtnl_lock();
|
||||
if (!err && netif_running(netdev))
|
||||
err = __igb_open(netdev, true);
|
||||
|
||||
if (!err)
|
||||
netif_device_attach(netdev);
|
||||
rtnl_unlock();
|
||||
if (!rpm)
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __maybe_unused igb_resume(struct device *dev)
|
||||
{
|
||||
return __igb_resume(dev, false);
|
||||
}
|
||||
|
||||
static int __maybe_unused igb_runtime_idle(struct device *dev)
|
||||
{
|
||||
struct net_device *netdev = dev_get_drvdata(dev);
|
||||
@@ -9326,7 +9333,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
|
||||
|
||||
static int __maybe_unused igb_runtime_resume(struct device *dev)
|
||||
{
|
||||
return igb_resume(dev);
|
||||
return __igb_resume(dev, true);
|
||||
}
|
||||
|
||||
static void igb_shutdown(struct pci_dev *pdev)
|
||||
@@ -9442,7 +9449,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
|
||||
* @pdev: Pointer to PCI device
|
||||
*
|
||||
* Restart the card from scratch, as if from a cold-boot. Implementation
|
||||
* resembles the first-half of the igb_resume routine.
|
||||
* resembles the first-half of the __igb_resume routine.
|
||||
**/
|
||||
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
@@ -9482,7 +9489,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
|
||||
*
|
||||
* This callback is called when the error recovery driver tells us that
|
||||
* its OK to resume normal operation. Implementation resembles the
|
||||
* second-half of the igb_resume routine.
|
||||
* second-half of the __igb_resume routine.
|
||||
*/
|
||||
static void igb_io_resume(struct pci_dev *pdev)
|
||||
{
|
||||
|
||||
@@ -71,6 +71,8 @@ struct xrx200_priv {
|
||||
struct xrx200_chan chan_tx;
|
||||
struct xrx200_chan chan_rx;
|
||||
|
||||
u16 rx_buf_size;
|
||||
|
||||
struct net_device *net_dev;
|
||||
struct device *dev;
|
||||
|
||||
@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
|
||||
xrx200_pmac_w32(priv, val, offset);
|
||||
}
|
||||
|
||||
static int xrx200_max_frame_len(int mtu)
|
||||
{
|
||||
return VLAN_ETH_HLEN + mtu;
|
||||
}
|
||||
|
||||
static int xrx200_buffer_size(int mtu)
|
||||
{
|
||||
return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
|
||||
}
|
||||
|
||||
/* drop all the packets from the DMA ring */
|
||||
static void xrx200_flush_dma(struct xrx200_chan *ch)
|
||||
{
|
||||
@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
|
||||
break;
|
||||
|
||||
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
|
||||
(ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
|
||||
ETH_FCS_LEN);
|
||||
ch->priv->rx_buf_size;
|
||||
ch->dma.desc++;
|
||||
ch->dma.desc %= LTQ_DESC_NUM;
|
||||
}
|
||||
@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
|
||||
|
||||
static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
{
|
||||
int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
|
||||
struct sk_buff *skb = ch->skb[ch->dma.desc];
|
||||
struct xrx200_priv *priv = ch->priv;
|
||||
dma_addr_t mapping;
|
||||
int ret = 0;
|
||||
|
||||
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
|
||||
len);
|
||||
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
|
||||
priv->rx_buf_size);
|
||||
if (!ch->skb[ch->dma.desc]) {
|
||||
ret = -ENOMEM;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
|
||||
len, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
|
||||
mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
|
||||
priv->rx_buf_size, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(priv->dev, mapping))) {
|
||||
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
|
||||
ch->skb[ch->dma.desc] = skb;
|
||||
ret = -ENOMEM;
|
||||
@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
|
||||
wmb();
|
||||
skip:
|
||||
ch->dma.desc_base[ch->dma.desc].ctl =
|
||||
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
|
||||
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
|
||||
int ret = 0;
|
||||
|
||||
net_dev->mtu = new_mtu;
|
||||
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
|
||||
|
||||
if (new_mtu <= old_mtu)
|
||||
return ret;
|
||||
@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
|
||||
ret = xrx200_alloc_skb(ch_rx);
|
||||
if (ret) {
|
||||
net_dev->mtu = old_mtu;
|
||||
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
|
||||
break;
|
||||
}
|
||||
dev_kfree_skb_any(skb);
|
||||
@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
|
||||
net_dev->netdev_ops = &xrx200_netdev_ops;
|
||||
SET_NETDEV_DEV(net_dev, dev);
|
||||
net_dev->min_mtu = ETH_ZLEN;
|
||||
net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
|
||||
net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
|
||||
priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
|
||||
|
||||
/* load the memory ranges */
|
||||
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
|
||||
|
||||
@@ -55,12 +55,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
|
||||
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
|
||||
u32 dev_id, u32 hw_id)
|
||||
{
|
||||
struct prestera_port *port = NULL;
|
||||
struct prestera_port *port = NULL, *tmp;
|
||||
|
||||
read_lock(&sw->port_list_lock);
|
||||
list_for_each_entry(port, &sw->port_list, list) {
|
||||
if (port->dev_id == dev_id && port->hw_id == hw_id)
|
||||
list_for_each_entry(tmp, &sw->port_list, list) {
|
||||
if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
|
||||
port = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
read_unlock(&sw->port_list_lock);
|
||||
|
||||
@@ -69,12 +71,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
|
||||
|
||||
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
|
||||
{
|
||||
struct prestera_port *port = NULL;
|
||||
struct prestera_port *port = NULL, *tmp;
|
||||
|
||||
read_lock(&sw->port_list_lock);
|
||||
list_for_each_entry(port, &sw->port_list, list) {
|
||||
if (port->id == id)
|
||||
list_for_each_entry(tmp, &sw->port_list, list) {
|
||||
if (tmp->id == id) {
|
||||
port = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
read_unlock(&sw->port_list_lock);
|
||||
|
||||
@@ -765,23 +769,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
|
||||
struct net_device *dev,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct netdev_notifier_changeupper_info *info = ptr;
|
||||
struct netdev_notifier_info *info = ptr;
|
||||
struct netdev_notifier_changeupper_info *cu_info;
|
||||
struct prestera_port *port = netdev_priv(dev);
|
||||
struct netlink_ext_ack *extack;
|
||||
struct net_device *upper;
|
||||
|
||||
extack = netdev_notifier_info_to_extack(&info->info);
|
||||
upper = info->upper_dev;
|
||||
extack = netdev_notifier_info_to_extack(info);
|
||||
cu_info = container_of(info,
|
||||
struct netdev_notifier_changeupper_info,
|
||||
info);
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_PRECHANGEUPPER:
|
||||
upper = cu_info->upper_dev;
|
||||
if (!netif_is_bridge_master(upper) &&
|
||||
!netif_is_lag_master(upper)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!info->linking)
|
||||
if (!cu_info->linking)
|
||||
break;
|
||||
|
||||
if (netdev_has_any_upper_dev(upper)) {
|
||||
@@ -790,7 +798,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
|
||||
}
|
||||
|
||||
if (netif_is_lag_master(upper) &&
|
||||
!prestera_lag_master_check(upper, info->upper_info, extack))
|
||||
!prestera_lag_master_check(upper, cu_info->upper_info, extack))
|
||||
return -EOPNOTSUPP;
|
||||
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
@@ -806,14 +814,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
|
||||
break;
|
||||
|
||||
case NETDEV_CHANGEUPPER:
|
||||
upper = cu_info->upper_dev;
|
||||
if (netif_is_bridge_master(upper)) {
|
||||
if (info->linking)
|
||||
if (cu_info->linking)
|
||||
return prestera_bridge_port_join(upper, port,
|
||||
extack);
|
||||
else
|
||||
prestera_bridge_port_leave(upper, port);
|
||||
} else if (netif_is_lag_master(upper)) {
|
||||
if (info->linking)
|
||||
if (cu_info->linking)
|
||||
return prestera_lag_port_add(port, upper);
|
||||
else
|
||||
prestera_lag_port_del(port);
|
||||
|
||||
@@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
|
||||
return ret;
|
||||
|
||||
netdev->irq = platform_get_irq(pdev, 0);
|
||||
if (netdev->irq < 0)
|
||||
return netdev->irq;
|
||||
|
||||
return ks8851_probe_common(netdev, dev, msg_enable);
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
|
||||
struct qlcnic_info *, u16);
|
||||
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
|
||||
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
|
||||
void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
|
||||
int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
|
||||
bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
|
||||
void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
|
||||
struct qlcnic_vf_info *, u16);
|
||||
|
||||
@@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
|
||||
struct qlcnic_cmd_args *cmd)
|
||||
{
|
||||
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
|
||||
int i, num_vlans;
|
||||
int i, num_vlans, ret;
|
||||
u16 *vlans;
|
||||
|
||||
if (sriov->allowed_vlans)
|
||||
@@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
|
||||
dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
|
||||
sriov->num_allowed_vlans);
|
||||
|
||||
qlcnic_sriov_alloc_vlans(adapter);
|
||||
ret = qlcnic_sriov_alloc_vlans(adapter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!sriov->any_vlan)
|
||||
return 0;
|
||||
@@ -2154,7 +2156,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
|
||||
return err;
|
||||
}
|
||||
|
||||
void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
|
||||
int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
|
||||
struct qlcnic_vf_info *vf;
|
||||
@@ -2164,7 +2166,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
|
||||
vf = &sriov->vf_info[i];
|
||||
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
|
||||
sizeof(*vf->sriov_vlans), GFP_KERNEL);
|
||||
if (!vf->sriov_vlans)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
|
||||
|
||||
@@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
|
||||
if (err)
|
||||
goto del_flr_queue;
|
||||
|
||||
qlcnic_sriov_alloc_vlans(adapter);
|
||||
err = qlcnic_sriov_alloc_vlans(adapter);
|
||||
if (err)
|
||||
goto del_flr_queue;
|
||||
|
||||
return err;
|
||||
|
||||
|
||||
@@ -728,7 +728,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
|
||||
efx->rx_bufs_per_page);
|
||||
rx_queue->page_ring = kcalloc(page_ring_size,
|
||||
sizeof(*rx_queue->page_ring), GFP_KERNEL);
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
if (!rx_queue->page_ring)
|
||||
rx_queue->page_ptr_mask = 0;
|
||||
else
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
}
|
||||
|
||||
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
|
||||
|
||||
@@ -150,7 +150,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
|
||||
efx->rx_bufs_per_page);
|
||||
rx_queue->page_ring = kcalloc(page_ring_size,
|
||||
sizeof(*rx_queue->page_ring), GFP_KERNEL);
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
if (!rx_queue->page_ring)
|
||||
rx_queue->page_ptr_mask = 0;
|
||||
else
|
||||
rx_queue->page_ptr_mask = page_ring_size - 1;
|
||||
}
|
||||
|
||||
static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
|
||||
|
||||
@@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
|
||||
|
||||
ndev->dma = (unsigned char)-1;
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
if (ndev->irq < 0) {
|
||||
ret = ndev->irq;
|
||||
goto release_both;
|
||||
}
|
||||
|
||||
lp = netdev_priv(ndev);
|
||||
lp->netdev = ndev;
|
||||
#ifdef SMC_DYNAMIC_BUS_CONFIG
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
|
||||
#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
|
||||
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
|
||||
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
|
||||
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
|
||||
|
||||
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
|
||||
time.tv_nsec = priv->plat->est->btr_reserve[0];
|
||||
time.tv_sec = priv->plat->est->btr_reserve[1];
|
||||
basetime = timespec64_to_ktime(time);
|
||||
cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
|
||||
cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
|
||||
priv->plat->est->ctr[0];
|
||||
time = stmmac_calc_tas_basetime(basetime,
|
||||
current_time_ns,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user