mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'net-6.11-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
"Including fixes from can, bluetooth and wireless.
No known regressions at this point. Another calm week, but chances are
that has more to do with vacation season than the quality of our work.
Current release - new code bugs:
- smc: prevent NULL pointer dereference in txopt_get
- eth: ti: am65-cpsw: number of XDP-related fixes
Previous releases - regressions:
- Revert "Bluetooth: MGMT/SMP: Fix address type when using SMP over
BREDR/LE", it breaks existing user space
- Bluetooth: qca: if memdump doesn't work, re-enable IBS to avoid
later problems with suspend
- can: mcp251x: fix deadlock if an interrupt occurs during
mcp251x_open
- eth: r8152: fix the firmware communication error due to use of bulk
write
- ptp: ocp: fix serial port information export
- eth: igb: fix not clearing TimeSync interrupts for 82580
- Revert "wifi: ath11k: support hibernation", fix suspend on Lenovo
Previous releases - always broken:
- eth: intel: fix crashes and bugs when reconfiguration and resets
happening in parallel
- wifi: ath11k: fix NULL dereference in ath11k_mac_get_eirp_power()
Misc:
- docs: netdev: document guidance on cleanup.h"
* tag 'net-6.11-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (61 commits)
ila: call nf_unregister_net_hooks() sooner
tools/net/ynl: fix cli.py --subscribe feature
MAINTAINERS: fix ptp ocp driver maintainers address
selftests: net: enable bind tests
net: dsa: vsc73xx: fix possible subblocks range of CAPT block
sched: sch_cake: fix bulk flow accounting logic for host fairness
docs: netdev: document guidance on cleanup.h
net: xilinx: axienet: Fix race in axienet_stop
net: bridge: br_fdb_external_learn_add(): always set EXT_LEARN
r8152: fix the firmware doesn't work
fou: Fix null-ptr-deref in GRO.
bareudp: Fix device stats updates.
net: mana: Fix error handling in mana_create_txq/rxq's NAPI cleanup
bpf, net: Fix a potential race in do_sock_getsockopt()
net: dqs: Do not use extern for unused dql_group
sch/netem: fix use after free in netem_dequeue
usbnet: modern method to get random MAC
MAINTAINERS: wifi: cw1200: add net-cw1200.h
ice: do not bring the VSI up, if it was down before the XDP setup
ice: remove ICE_CFG_BUSY locking from AF_XDP code
...
This commit is contained in:
@@ -258,24 +258,29 @@ Description: (RW) When retrieving the PHC with the PTP SYS_OFFSET_EXTENDED
|
||||
the estimated point where the FPGA latches the PHC time. This
|
||||
value may be changed by writing an unsigned integer.
|
||||
|
||||
What: /sys/class/timecard/ocpN/ttyGNSS
|
||||
What: /sys/class/timecard/ocpN/ttyGNSS2
|
||||
Date: September 2021
|
||||
Contact: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
Description: These optional attributes link to the TTY serial ports
|
||||
associated with the GNSS devices.
|
||||
What: /sys/class/timecard/ocpN/tty
|
||||
Date: August 2024
|
||||
Contact: Vadim Fedorenko <vadim.fedorenko@linux.dev>
|
||||
Description: (RO) Directory containing the sysfs nodes for TTY attributes
|
||||
|
||||
What: /sys/class/timecard/ocpN/ttyMAC
|
||||
Date: September 2021
|
||||
What: /sys/class/timecard/ocpN/tty/ttyGNSS
|
||||
What: /sys/class/timecard/ocpN/tty/ttyGNSS2
|
||||
Date: August 2024
|
||||
Contact: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
Description: This optional attribute links to the TTY serial port
|
||||
associated with the Miniature Atomic Clock.
|
||||
Description: (RO) These optional attributes contain names of the TTY serial
|
||||
ports associated with the GNSS devices.
|
||||
|
||||
What: /sys/class/timecard/ocpN/ttyNMEA
|
||||
Date: September 2021
|
||||
What: /sys/class/timecard/ocpN/tty/ttyMAC
|
||||
Date: August 2024
|
||||
Contact: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
Description: This optional attribute links to the TTY serial port
|
||||
which outputs the PHC time in NMEA ZDA format.
|
||||
Description: (RO) This optional attribute contains name of the TTY serial
|
||||
port associated with the Miniature Atomic Clock.
|
||||
|
||||
What: /sys/class/timecard/ocpN/tty/ttyNMEA
|
||||
Date: August 2024
|
||||
Contact: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
Description: (RO) This optional attribute contains name of the TTY serial
|
||||
port which outputs the PHC time in NMEA ZDA format.
|
||||
|
||||
What: /sys/class/timecard/ocpN/utc_tai_offset
|
||||
Date: September 2021
|
||||
|
||||
@@ -375,6 +375,22 @@ When working in existing code which uses nonstandard formatting make
|
||||
your code follow the most recent guidelines, so that eventually all code
|
||||
in the domain of netdev is in the preferred format.
|
||||
|
||||
Using device-managed and cleanup.h constructs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Netdev remains skeptical about promises of all "auto-cleanup" APIs,
|
||||
including even ``devm_`` helpers, historically. They are not the preferred
|
||||
style of implementation, merely an acceptable one.
|
||||
|
||||
Use of ``guard()`` is discouraged within any function longer than 20 lines,
|
||||
``scoped_guard()`` is considered more readable. Using normal lock/unlock is
|
||||
still (weakly) preferred.
|
||||
|
||||
Low level cleanup constructs (such as ``__free()``) can be used when building
|
||||
APIs and helpers, especially scoped iterators. However, direct use of
|
||||
``__free()`` within networking core and drivers is discouraged.
|
||||
Similar guidance applies to declaring variables mid-function.
|
||||
|
||||
Resending after review
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
@@ -5956,6 +5956,7 @@ F: Documentation/process/cve.rst
|
||||
CW1200 WLAN driver
|
||||
S: Orphan
|
||||
F: drivers/net/wireless/st/cw1200/
|
||||
F: include/linux/platform_data/net-cw1200.h
|
||||
|
||||
CX18 VIDEO4LINUX DRIVER
|
||||
M: Andy Walls <awalls@md.metrocast.net>
|
||||
@@ -15905,6 +15906,8 @@ F: include/uapi/linux/ethtool_netlink.h
|
||||
F: include/uapi/linux/if_*
|
||||
F: include/uapi/linux/netdev*
|
||||
F: tools/testing/selftests/drivers/net/
|
||||
X: Documentation/devicetree/bindings/net/bluetooth/
|
||||
X: Documentation/devicetree/bindings/net/wireless/
|
||||
X: drivers/net/wireless/
|
||||
|
||||
NETWORKING DRIVERS (WIRELESS)
|
||||
@@ -17130,7 +17133,7 @@ F: include/dt-bindings/
|
||||
|
||||
OPENCOMPUTE PTP CLOCK DRIVER
|
||||
M: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
M: Vadim Fedorenko <vadfed@linux.dev>
|
||||
M: Vadim Fedorenko <vadim.fedorenko@linux.dev>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/ptp/ptp_ocp.c
|
||||
|
||||
@@ -1091,6 +1091,7 @@ static void qca_controller_memdump(struct work_struct *work)
|
||||
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
|
||||
cancel_delayed_work(&qca->ctrl_memdump_timeout);
|
||||
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
|
||||
clear_bit(QCA_IBS_DISABLED, &qca->flags);
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
|
||||
sizeof(ipversion))) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
ipversion >>= 4;
|
||||
@@ -93,7 +93,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
|
||||
proto = htons(ETH_P_IPV6);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
|
||||
@@ -107,7 +107,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
ipv4_is_multicast(tunnel_hdr->daddr)) {
|
||||
proto = htons(ETH_P_MPLS_MC);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
@@ -123,7 +123,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
(addr_type & IPV6_ADDR_MULTICAST)) {
|
||||
proto = htons(ETH_P_MPLS_MC);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
proto,
|
||||
!net_eq(bareudp->net,
|
||||
dev_net(bareudp->dev)))) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
|
||||
if (!tun_dst) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
DEV_STATS_INC(bareudp->dev, rx_dropped);
|
||||
goto drop;
|
||||
}
|
||||
skb_dst_set(skb, &tun_dst->dst);
|
||||
@@ -169,8 +169,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
&((struct ipv6hdr *)oiph)->saddr);
|
||||
}
|
||||
if (err > 1) {
|
||||
++bareudp->dev->stats.rx_frame_errors;
|
||||
++bareudp->dev->stats.rx_errors;
|
||||
DEV_STATS_INC(bareudp->dev, rx_frame_errors);
|
||||
DEV_STATS_INC(bareudp->dev, rx_errors);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
@@ -467,11 +467,11 @@ tx_error:
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
if (err == -ELOOP)
|
||||
dev->stats.collisions++;
|
||||
DEV_STATS_INC(dev, collisions);
|
||||
else if (err == -ENETUNREACH)
|
||||
dev->stats.tx_carrier_errors++;
|
||||
DEV_STATS_INC(dev, tx_carrier_errors);
|
||||
|
||||
dev->stats.tx_errors++;
|
||||
DEV_STATS_INC(dev, tx_errors);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
||||
@@ -1686,6 +1686,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
|
||||
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
|
||||
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
|
||||
u32 srb_irq = 0;
|
||||
u32 srb_release = 0;
|
||||
int i;
|
||||
|
||||
if (!(pci_irq & irq_mask->all))
|
||||
@@ -1699,17 +1700,14 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
|
||||
kvaser_pciefd_transmit_irq(pcie->can[i]);
|
||||
}
|
||||
|
||||
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
|
||||
/* Reset DMA buffer 0, may trigger new interrupt */
|
||||
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
|
||||
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
|
||||
}
|
||||
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
|
||||
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
|
||||
|
||||
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
|
||||
/* Reset DMA buffer 1, may trigger new interrupt */
|
||||
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
|
||||
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
|
||||
}
|
||||
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
|
||||
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
|
||||
|
||||
if (srb_release)
|
||||
iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -483,11 +483,10 @@ static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
|
||||
{
|
||||
m_can_coalescing_disable(cdev);
|
||||
m_can_write(cdev, M_CAN_ILE, 0x0);
|
||||
cdev->active_interrupts = 0x0;
|
||||
|
||||
if (!cdev->net->irq) {
|
||||
dev_dbg(cdev->dev, "Stop hrtimer\n");
|
||||
hrtimer_cancel(&cdev->hrtimer);
|
||||
hrtimer_try_to_cancel(&cdev->hrtimer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1037,22 +1036,6 @@ end:
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
|
||||
{
|
||||
struct m_can_classdev *cdev = netdev_priv(dev);
|
||||
int work_done;
|
||||
|
||||
work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
|
||||
|
||||
/* Don't re-enable interrupts if the driver had a fatal error
|
||||
* (e.g., FIFO read failure).
|
||||
*/
|
||||
if (work_done < 0)
|
||||
m_can_disable_all_interrupts(cdev);
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static int m_can_poll(struct napi_struct *napi, int quota)
|
||||
{
|
||||
struct net_device *dev = napi->dev;
|
||||
@@ -1217,16 +1200,18 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
static irqreturn_t m_can_isr(int irq, void *dev_id)
|
||||
/* This interrupt handler is called either from the interrupt thread or a
|
||||
* hrtimer. This has implications like cancelling a timer won't be possible
|
||||
* blocking.
|
||||
*/
|
||||
static int m_can_interrupt_handler(struct m_can_classdev *cdev)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)dev_id;
|
||||
struct m_can_classdev *cdev = netdev_priv(dev);
|
||||
struct net_device *dev = cdev->net;
|
||||
u32 ir;
|
||||
int ret;
|
||||
|
||||
if (pm_runtime_suspended(cdev->dev)) {
|
||||
m_can_coalescing_disable(cdev);
|
||||
if (pm_runtime_suspended(cdev->dev))
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
ir = m_can_read(cdev, M_CAN_IR);
|
||||
m_can_coalescing_update(cdev, ir);
|
||||
@@ -1250,11 +1235,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
|
||||
m_can_disable_all_interrupts(cdev);
|
||||
napi_schedule(&cdev->napi);
|
||||
} else {
|
||||
int pkts;
|
||||
|
||||
pkts = m_can_rx_peripheral(dev, ir);
|
||||
if (pkts < 0)
|
||||
goto out_fail;
|
||||
ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1272,8 +1255,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
|
||||
} else {
|
||||
if (ir & (IR_TEFN | IR_TEFW)) {
|
||||
/* New TX FIFO Element arrived */
|
||||
if (m_can_echo_tx_event(dev) != 0)
|
||||
goto out_fail;
|
||||
ret = m_can_echo_tx_event(dev);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1281,16 +1265,31 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
|
||||
can_rx_offload_threaded_irq_finish(&cdev->offload);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
out_fail:
|
||||
m_can_disable_all_interrupts(cdev);
|
||||
return IRQ_HANDLED;
|
||||
static irqreturn_t m_can_isr(int irq, void *dev_id)
|
||||
{
|
||||
struct net_device *dev = (struct net_device *)dev_id;
|
||||
struct m_can_classdev *cdev = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = m_can_interrupt_handler(cdev);
|
||||
if (ret < 0) {
|
||||
m_can_disable_all_interrupts(cdev);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
|
||||
{
|
||||
struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
|
||||
|
||||
if (cdev->can.state == CAN_STATE_BUS_OFF ||
|
||||
cdev->can.state == CAN_STATE_STOPPED)
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
irq_wake_thread(cdev->net->irq, cdev->net);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
@@ -1542,6 +1541,7 @@ static int m_can_chip_config(struct net_device *dev)
|
||||
else
|
||||
interrupts &= ~(IR_ERR_LEC_31X);
|
||||
}
|
||||
cdev->active_interrupts = 0;
|
||||
m_can_interrupt_enable(cdev, interrupts);
|
||||
|
||||
/* route all interrupts to INT0 */
|
||||
@@ -1991,8 +1991,17 @@ static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
|
||||
{
|
||||
struct m_can_classdev *cdev = container_of(timer, struct
|
||||
m_can_classdev, hrtimer);
|
||||
int ret;
|
||||
|
||||
m_can_isr(0, cdev->net);
|
||||
if (cdev->can.state == CAN_STATE_BUS_OFF ||
|
||||
cdev->can.state == CAN_STATE_STOPPED)
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
ret = m_can_interrupt_handler(cdev);
|
||||
|
||||
/* On error or if napi is scheduled to read, stop the timer */
|
||||
if (ret < 0 || napi_is_scheduled(&cdev->napi))
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
|
||||
|
||||
@@ -2052,7 +2061,7 @@ static int m_can_open(struct net_device *dev)
|
||||
/* start the m_can controller */
|
||||
err = m_can_start(dev);
|
||||
if (err)
|
||||
goto exit_irq_fail;
|
||||
goto exit_start_fail;
|
||||
|
||||
if (!cdev->is_peripheral)
|
||||
napi_enable(&cdev->napi);
|
||||
@@ -2061,6 +2070,9 @@ static int m_can_open(struct net_device *dev)
|
||||
|
||||
return 0;
|
||||
|
||||
exit_start_fail:
|
||||
if (cdev->is_peripheral || dev->irq)
|
||||
free_irq(dev->irq, dev);
|
||||
exit_irq_fail:
|
||||
if (cdev->is_peripheral)
|
||||
destroy_workqueue(cdev->tx_wq);
|
||||
@@ -2172,7 +2184,7 @@ static int m_can_set_coalesce(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops m_can_ethtool_ops = {
|
||||
static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
|
||||
ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
|
||||
ETHTOOL_COALESCE_TX_USECS_IRQ |
|
||||
@@ -2183,18 +2195,20 @@ static const struct ethtool_ops m_can_ethtool_ops = {
|
||||
.set_coalesce = m_can_set_coalesce,
|
||||
};
|
||||
|
||||
static const struct ethtool_ops m_can_ethtool_ops_polling = {
|
||||
static const struct ethtool_ops m_can_ethtool_ops = {
|
||||
.get_ts_info = ethtool_op_get_ts_info,
|
||||
};
|
||||
|
||||
static int register_m_can_dev(struct net_device *dev)
|
||||
static int register_m_can_dev(struct m_can_classdev *cdev)
|
||||
{
|
||||
struct net_device *dev = cdev->net;
|
||||
|
||||
dev->flags |= IFF_ECHO; /* we support local echo */
|
||||
dev->netdev_ops = &m_can_netdev_ops;
|
||||
if (dev->irq)
|
||||
dev->ethtool_ops = &m_can_ethtool_ops;
|
||||
if (dev->irq && cdev->is_peripheral)
|
||||
dev->ethtool_ops = &m_can_ethtool_ops_coalescing;
|
||||
else
|
||||
dev->ethtool_ops = &m_can_ethtool_ops_polling;
|
||||
dev->ethtool_ops = &m_can_ethtool_ops;
|
||||
|
||||
return register_candev(dev);
|
||||
}
|
||||
@@ -2380,7 +2394,7 @@ int m_can_class_register(struct m_can_classdev *cdev)
|
||||
if (ret)
|
||||
goto rx_offload_del;
|
||||
|
||||
ret = register_m_can_dev(cdev->net);
|
||||
ret = register_m_can_dev(cdev);
|
||||
if (ret) {
|
||||
dev_err(cdev->dev, "registering %s failed (err=%d)\n",
|
||||
cdev->net->name, ret);
|
||||
@@ -2427,12 +2441,15 @@ int m_can_class_suspend(struct device *dev)
|
||||
netif_device_detach(ndev);
|
||||
|
||||
/* leave the chip running with rx interrupt enabled if it is
|
||||
* used as a wake-up source.
|
||||
* used as a wake-up source. Coalescing needs to be reset then,
|
||||
* the timer is cancelled here, interrupts are done in resume.
|
||||
*/
|
||||
if (cdev->pm_wake_source)
|
||||
if (cdev->pm_wake_source) {
|
||||
hrtimer_cancel(&cdev->hrtimer);
|
||||
m_can_write(cdev, M_CAN_IE, IR_RF0N);
|
||||
else
|
||||
} else {
|
||||
m_can_stop(ndev);
|
||||
}
|
||||
|
||||
m_can_clk_stop(cdev);
|
||||
}
|
||||
@@ -2462,6 +2479,13 @@ int m_can_class_resume(struct device *dev)
|
||||
return ret;
|
||||
|
||||
if (cdev->pm_wake_source) {
|
||||
/* Restore active interrupts but disable coalescing as
|
||||
* we may have missed important waterlevel interrupts
|
||||
* between suspend and resume. Timers are already
|
||||
* stopped in suspend. Here we enable all interrupts
|
||||
* again.
|
||||
*/
|
||||
cdev->active_interrupts |= IR_RF0N | IR_TEFN;
|
||||
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
|
||||
} else {
|
||||
ret = m_can_start(ndev);
|
||||
|
||||
@@ -752,7 +752,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
|
||||
int ret;
|
||||
|
||||
/* Force wakeup interrupt to wake device, but don't execute IST */
|
||||
disable_irq(spi->irq);
|
||||
disable_irq_nosync(spi->irq);
|
||||
mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
|
||||
|
||||
/* Wait for oscillator startup timer after wake up */
|
||||
|
||||
@@ -97,7 +97,16 @@ void can_ram_get_layout(struct can_ram_layout *layout,
|
||||
if (ring) {
|
||||
u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
|
||||
|
||||
num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
|
||||
/* If the ring parameters have been configured in
|
||||
* CAN-CC mode, but and we are in CAN-FD mode now,
|
||||
* they might be to big. Use the default CAN-FD values
|
||||
* in this case.
|
||||
*/
|
||||
num_rx = ring->rx_pending;
|
||||
if (num_rx > layout->max_rx)
|
||||
num_rx = layout->default_rx;
|
||||
|
||||
num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
|
||||
|
||||
/* The ethtool doc says:
|
||||
* To disable coalescing, set usecs = 0 and max_frames = 1.
|
||||
|
||||
@@ -290,7 +290,7 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
|
||||
const struct mcp251xfd_rx_ring *rx_ring;
|
||||
u16 base = 0, ram_used;
|
||||
u8 fifo_nr = 1;
|
||||
int i;
|
||||
int err = 0, i;
|
||||
|
||||
netdev_reset_queue(priv->ndev);
|
||||
|
||||
@@ -386,10 +386,18 @@ int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
|
||||
netdev_err(priv->ndev,
|
||||
"Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
|
||||
ram_used, MCP251XFD_RAM_SIZE);
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (priv->tx_obj_num_coalesce_irq &&
|
||||
priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
|
||||
netdev_err(priv->ndev,
|
||||
"Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
|
||||
priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
|
||||
@@ -469,11 +477,25 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
|
||||
|
||||
/* switching from CAN-2.0 to CAN-FD mode or vice versa */
|
||||
if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
|
||||
const struct ethtool_ringparam ring = {
|
||||
.rx_pending = priv->rx_obj_num,
|
||||
.tx_pending = priv->tx->obj_num,
|
||||
};
|
||||
const struct ethtool_coalesce ec = {
|
||||
.rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
|
||||
.rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq,
|
||||
.tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
|
||||
.tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq,
|
||||
};
|
||||
struct can_ram_layout layout;
|
||||
|
||||
can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
|
||||
priv->rx_obj_num = layout.default_rx;
|
||||
tx_ring->obj_num = layout.default_tx;
|
||||
can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
|
||||
|
||||
priv->rx_obj_num = layout.cur_rx;
|
||||
priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
|
||||
|
||||
tx_ring->obj_num = layout.cur_tx;
|
||||
priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
|
||||
}
|
||||
|
||||
if (fd_mode) {
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
|
||||
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
|
||||
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
|
||||
#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
|
||||
#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
|
||||
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
|
||||
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
|
||||
|
||||
@@ -410,13 +410,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
|
||||
break;
|
||||
|
||||
case VSC73XX_BLOCK_MII:
|
||||
case VSC73XX_BLOCK_CAPTURE:
|
||||
case VSC73XX_BLOCK_ARBITER:
|
||||
switch (subblock) {
|
||||
case 0 ... 1:
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case VSC73XX_BLOCK_CAPTURE:
|
||||
switch (subblock) {
|
||||
case 0 ... 4:
|
||||
case 6 ... 7:
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -318,6 +318,7 @@ enum ice_vsi_state {
|
||||
ICE_VSI_UMAC_FLTR_CHANGED,
|
||||
ICE_VSI_MMAC_FLTR_CHANGED,
|
||||
ICE_VSI_PROMISC_CHANGED,
|
||||
ICE_VSI_REBUILD_PENDING,
|
||||
ICE_VSI_STATE_NBITS /* must be last */
|
||||
};
|
||||
|
||||
@@ -411,6 +412,7 @@ struct ice_vsi {
|
||||
struct ice_tx_ring **xdp_rings; /* XDP ring array */
|
||||
u16 num_xdp_txq; /* Used XDP queues */
|
||||
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
|
||||
struct mutex xdp_state_lock;
|
||||
|
||||
struct net_device **target_netdevs;
|
||||
|
||||
|
||||
@@ -190,16 +190,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
|
||||
}
|
||||
q_vector = vsi->q_vectors[v_idx];
|
||||
|
||||
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
|
||||
ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
|
||||
NULL);
|
||||
ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx)
|
||||
tx_ring->q_vector = NULL;
|
||||
}
|
||||
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
|
||||
ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
|
||||
NULL);
|
||||
|
||||
ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx)
|
||||
rx_ring->q_vector = NULL;
|
||||
}
|
||||
|
||||
/* only VSI with an associated netdev is set up with NAPI */
|
||||
if (vsi->netdev)
|
||||
|
||||
@@ -447,6 +447,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
|
||||
|
||||
ice_vsi_free_stats(vsi);
|
||||
ice_vsi_free_arrays(vsi);
|
||||
mutex_destroy(&vsi->xdp_state_lock);
|
||||
mutex_unlock(&pf->sw_mutex);
|
||||
devm_kfree(dev, vsi);
|
||||
}
|
||||
@@ -626,6 +627,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
|
||||
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
|
||||
pf->next_vsi);
|
||||
|
||||
mutex_init(&vsi->xdp_state_lock);
|
||||
|
||||
unlock_pf:
|
||||
mutex_unlock(&pf->sw_mutex);
|
||||
return vsi;
|
||||
@@ -2286,9 +2289,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
|
||||
|
||||
ice_vsi_map_rings_to_vectors(vsi);
|
||||
|
||||
/* Associate q_vector rings to napi */
|
||||
ice_vsi_set_napi_queues(vsi);
|
||||
|
||||
vsi->stat_offsets_loaded = false;
|
||||
|
||||
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
|
||||
@@ -2426,7 +2426,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
|
||||
dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
|
||||
vsi->vsi_num, err);
|
||||
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
if (vsi->xdp_rings)
|
||||
/* return value check can be skipped here, it always returns
|
||||
* 0 if reset is in progress
|
||||
*/
|
||||
@@ -2528,7 +2528,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
|
||||
for (q = 0; q < q_vector->num_ring_tx; q++) {
|
||||
ice_write_itr(&q_vector->tx, 0);
|
||||
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
if (vsi->xdp_rings) {
|
||||
u32 xdp_txq = txq + vsi->num_xdp_txq;
|
||||
|
||||
wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
|
||||
@@ -2628,6 +2628,7 @@ void ice_vsi_close(struct ice_vsi *vsi)
|
||||
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
|
||||
ice_down(vsi);
|
||||
|
||||
ice_vsi_clear_napi_queues(vsi);
|
||||
ice_vsi_free_irq(vsi);
|
||||
ice_vsi_free_tx_rings(vsi);
|
||||
ice_vsi_free_rx_rings(vsi);
|
||||
@@ -2671,8 +2672,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
|
||||
*/
|
||||
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
|
||||
{
|
||||
if (test_bit(ICE_VSI_DOWN, vsi->state))
|
||||
return;
|
||||
bool already_down = test_bit(ICE_VSI_DOWN, vsi->state);
|
||||
|
||||
set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
|
||||
|
||||
@@ -2680,134 +2680,70 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
|
||||
if (netif_running(vsi->netdev)) {
|
||||
if (!locked)
|
||||
rtnl_lock();
|
||||
|
||||
ice_vsi_close(vsi);
|
||||
already_down = test_bit(ICE_VSI_DOWN, vsi->state);
|
||||
if (!already_down)
|
||||
ice_vsi_close(vsi);
|
||||
|
||||
if (!locked)
|
||||
rtnl_unlock();
|
||||
} else {
|
||||
} else if (!already_down) {
|
||||
ice_vsi_close(vsi);
|
||||
}
|
||||
} else if (vsi->type == ICE_VSI_CTRL) {
|
||||
} else if (vsi->type == ICE_VSI_CTRL && !already_down) {
|
||||
ice_vsi_close(vsi);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_queue_set_napi - Set the napi instance for the queue
|
||||
* @dev: device to which NAPI and queue belong
|
||||
* @queue_index: Index of queue
|
||||
* @type: queue type as RX or TX
|
||||
* @napi: NAPI context
|
||||
* @locked: is the rtnl_lock already held
|
||||
*
|
||||
* Set the napi instance for the queue. Caller indicates the lock status.
|
||||
*/
|
||||
static void
|
||||
__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
|
||||
enum netdev_queue_type type, struct napi_struct *napi,
|
||||
bool locked)
|
||||
{
|
||||
if (!locked)
|
||||
rtnl_lock();
|
||||
netif_queue_set_napi(dev, queue_index, type, napi);
|
||||
if (!locked)
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_queue_set_napi - Set the napi instance for the queue
|
||||
* @vsi: VSI being configured
|
||||
* @queue_index: Index of queue
|
||||
* @type: queue type as RX or TX
|
||||
* @napi: NAPI context
|
||||
*
|
||||
* Set the napi instance for the queue. The rtnl lock state is derived from the
|
||||
* execution path.
|
||||
*/
|
||||
void
|
||||
ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
|
||||
enum netdev_queue_type type, struct napi_struct *napi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
if (!vsi->netdev)
|
||||
return;
|
||||
|
||||
if (current_work() == &pf->serv_task ||
|
||||
test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
|
||||
test_bit(ICE_DOWN, pf->state) ||
|
||||
test_bit(ICE_SUSPENDED, pf->state))
|
||||
__ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
|
||||
false);
|
||||
else
|
||||
__ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
|
||||
true);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
|
||||
* @q_vector: q_vector pointer
|
||||
* @locked: is the rtnl_lock already held
|
||||
*
|
||||
* Associate the q_vector napi with all the queue[s] on the vector.
|
||||
* Caller indicates the lock status.
|
||||
*/
|
||||
void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
|
||||
{
|
||||
struct ice_rx_ring *rx_ring;
|
||||
struct ice_tx_ring *tx_ring;
|
||||
|
||||
ice_for_each_rx_ring(rx_ring, q_vector->rx)
|
||||
__ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
|
||||
NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
|
||||
locked);
|
||||
|
||||
ice_for_each_tx_ring(tx_ring, q_vector->tx)
|
||||
__ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
|
||||
NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
|
||||
locked);
|
||||
/* Also set the interrupt number for the NAPI */
|
||||
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
|
||||
* @q_vector: q_vector pointer
|
||||
*
|
||||
* Associate the q_vector napi with all the queue[s] on the vector
|
||||
*/
|
||||
void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
|
||||
{
|
||||
struct ice_rx_ring *rx_ring;
|
||||
struct ice_tx_ring *tx_ring;
|
||||
|
||||
ice_for_each_rx_ring(rx_ring, q_vector->rx)
|
||||
ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
|
||||
NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
|
||||
|
||||
ice_for_each_tx_ring(tx_ring, q_vector->tx)
|
||||
ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
|
||||
NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
|
||||
/* Also set the interrupt number for the NAPI */
|
||||
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_set_napi_queues
|
||||
* ice_vsi_set_napi_queues - associate netdev queues with napi
|
||||
* @vsi: VSI pointer
|
||||
*
|
||||
* Associate queue[s] with napi for all vectors
|
||||
* Associate queue[s] with napi for all vectors.
|
||||
* The caller must hold rtnl_lock.
|
||||
*/
|
||||
void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
|
||||
{
|
||||
int i;
|
||||
struct net_device *netdev = vsi->netdev;
|
||||
int q_idx, v_idx;
|
||||
|
||||
if (!vsi->netdev)
|
||||
if (!netdev)
|
||||
return;
|
||||
|
||||
ice_for_each_q_vector(vsi, i)
|
||||
ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
|
||||
ice_for_each_rxq(vsi, q_idx)
|
||||
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
|
||||
&vsi->rx_rings[q_idx]->q_vector->napi);
|
||||
|
||||
ice_for_each_txq(vsi, q_idx)
|
||||
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
|
||||
&vsi->tx_rings[q_idx]->q_vector->napi);
|
||||
/* Also set the interrupt number for the NAPI */
|
||||
ice_for_each_q_vector(vsi, v_idx) {
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
|
||||
|
||||
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_clear_napi_queues - dissociate netdev queues from napi
|
||||
* @vsi: VSI pointer
|
||||
*
|
||||
* Clear the association between all VSI queues queue[s] and napi.
|
||||
* The caller must hold rtnl_lock.
|
||||
*/
|
||||
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
|
||||
{
|
||||
struct net_device *netdev = vsi->netdev;
|
||||
int q_idx;
|
||||
|
||||
if (!netdev)
|
||||
return;
|
||||
|
||||
ice_for_each_txq(vsi, q_idx)
|
||||
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
|
||||
|
||||
ice_for_each_rxq(vsi, q_idx)
|
||||
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3039,19 +2975,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
|
||||
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&vsi->xdp_state_lock);
|
||||
|
||||
ret = ice_vsi_realloc_stat_arrays(vsi);
|
||||
if (ret)
|
||||
goto err_vsi_cfg;
|
||||
goto unlock;
|
||||
|
||||
ice_vsi_decfg(vsi);
|
||||
ret = ice_vsi_cfg_def(vsi);
|
||||
if (ret)
|
||||
goto err_vsi_cfg;
|
||||
goto unlock;
|
||||
|
||||
coalesce = kcalloc(vsi->num_q_vectors,
|
||||
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
|
||||
if (!coalesce)
|
||||
return -ENOMEM;
|
||||
if (!coalesce) {
|
||||
ret = -ENOMEM;
|
||||
goto decfg;
|
||||
}
|
||||
|
||||
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
|
||||
|
||||
@@ -3059,22 +2999,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
|
||||
if (ret) {
|
||||
if (vsi_flags & ICE_VSI_FLAG_INIT) {
|
||||
ret = -EIO;
|
||||
goto err_vsi_cfg_tc_lan;
|
||||
goto free_coalesce;
|
||||
}
|
||||
|
||||
kfree(coalesce);
|
||||
return ice_schedule_reset(pf, ICE_RESET_PFR);
|
||||
ret = ice_schedule_reset(pf, ICE_RESET_PFR);
|
||||
goto free_coalesce;
|
||||
}
|
||||
|
||||
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
|
||||
kfree(coalesce);
|
||||
clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state);
|
||||
|
||||
return 0;
|
||||
|
||||
err_vsi_cfg_tc_lan:
|
||||
ice_vsi_decfg(vsi);
|
||||
free_coalesce:
|
||||
kfree(coalesce);
|
||||
err_vsi_cfg:
|
||||
decfg:
|
||||
if (ret)
|
||||
ice_vsi_decfg(vsi);
|
||||
unlock:
|
||||
mutex_unlock(&vsi->xdp_state_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -44,16 +44,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
|
||||
struct ice_vsi *
|
||||
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
|
||||
|
||||
void
|
||||
ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
|
||||
enum netdev_queue_type type, struct napi_struct *napi);
|
||||
|
||||
void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
|
||||
|
||||
void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
|
||||
|
||||
void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
|
||||
|
||||
int ice_vsi_release(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_close(struct ice_vsi *vsi);
|
||||
|
||||
@@ -608,11 +608,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
|
||||
}
|
||||
}
|
||||
|
||||
if (vsi->netdev)
|
||||
netif_device_detach(vsi->netdev);
|
||||
skip:
|
||||
|
||||
/* clear SW filtering DB */
|
||||
ice_clear_hw_tbls(hw);
|
||||
/* disable the VSIs and their queues that are not already DOWN */
|
||||
set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
|
||||
ice_pf_dis_all_vsi(pf, false);
|
||||
|
||||
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
|
||||
@@ -3001,8 +3005,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
|
||||
bool if_running = netif_running(vsi->netdev);
|
||||
int ret = 0, xdp_ring_err = 0;
|
||||
bool if_running;
|
||||
|
||||
if (prog && !prog->aux->xdp_has_frags) {
|
||||
if (frame_size > ice_max_xdp_frame_size(vsi)) {
|
||||
@@ -3013,13 +3017,17 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||||
}
|
||||
|
||||
/* hot swap progs and avoid toggling link */
|
||||
if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
|
||||
if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
|
||||
test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
|
||||
ice_vsi_assign_bpf_prog(vsi, prog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if_running = netif_running(vsi->netdev) &&
|
||||
!test_and_set_bit(ICE_VSI_DOWN, vsi->state);
|
||||
|
||||
/* need to stop netdev while setting up the program for Rx rings */
|
||||
if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
|
||||
if (if_running) {
|
||||
ret = ice_down(vsi);
|
||||
if (ret) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
|
||||
@@ -3085,21 +3093,28 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(dev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
int ret;
|
||||
|
||||
if (vsi->type != ICE_VSI_PF) {
|
||||
NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&vsi->xdp_state_lock);
|
||||
|
||||
switch (xdp->command) {
|
||||
case XDP_SETUP_PROG:
|
||||
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
|
||||
ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
|
||||
break;
|
||||
case XDP_SETUP_XSK_POOL:
|
||||
return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
|
||||
xdp->xsk.queue_id);
|
||||
ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
mutex_unlock(&vsi->xdp_state_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3555,11 +3570,9 @@ static void ice_napi_add(struct ice_vsi *vsi)
|
||||
if (!vsi->netdev)
|
||||
return;
|
||||
|
||||
ice_for_each_q_vector(vsi, v_idx) {
|
||||
ice_for_each_q_vector(vsi, v_idx)
|
||||
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
|
||||
ice_napi_poll);
|
||||
__ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -5537,7 +5550,9 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
|
||||
if (ret)
|
||||
goto err_reinit;
|
||||
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
|
||||
rtnl_lock();
|
||||
ice_vsi_set_napi_queues(pf->vsi[v]);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
ret = ice_req_irq_msix_misc(pf);
|
||||
@@ -5551,8 +5566,12 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
|
||||
|
||||
err_reinit:
|
||||
while (v--)
|
||||
if (pf->vsi[v])
|
||||
if (pf->vsi[v]) {
|
||||
rtnl_lock();
|
||||
ice_vsi_clear_napi_queues(pf->vsi[v]);
|
||||
rtnl_unlock();
|
||||
ice_vsi_free_q_vectors(pf->vsi[v]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -5617,6 +5636,9 @@ static int ice_suspend(struct device *dev)
|
||||
ice_for_each_vsi(pf, v) {
|
||||
if (!pf->vsi[v])
|
||||
continue;
|
||||
rtnl_lock();
|
||||
ice_vsi_clear_napi_queues(pf->vsi[v]);
|
||||
rtnl_unlock();
|
||||
ice_vsi_free_q_vectors(pf->vsi[v]);
|
||||
}
|
||||
ice_clear_interrupt_scheme(pf);
|
||||
@@ -7230,7 +7252,7 @@ int ice_down(struct ice_vsi *vsi)
|
||||
if (tx_err)
|
||||
netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
|
||||
vsi->vsi_num, tx_err);
|
||||
if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
|
||||
if (!tx_err && vsi->xdp_rings) {
|
||||
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
|
||||
if (tx_err)
|
||||
netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
|
||||
@@ -7247,7 +7269,7 @@ int ice_down(struct ice_vsi *vsi)
|
||||
ice_for_each_txq(vsi, i)
|
||||
ice_clean_tx_ring(vsi->tx_rings[i]);
|
||||
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
if (vsi->xdp_rings)
|
||||
ice_for_each_xdp_txq(vsi, i)
|
||||
ice_clean_tx_ring(vsi->xdp_rings[i]);
|
||||
|
||||
@@ -7452,6 +7474,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
|
||||
err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
|
||||
if (err)
|
||||
goto err_set_qs;
|
||||
|
||||
ice_vsi_set_napi_queues(vsi);
|
||||
}
|
||||
|
||||
err = ice_up_complete(vsi);
|
||||
@@ -7589,6 +7613,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
|
||||
*/
|
||||
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
{
|
||||
struct ice_vsi *vsi = ice_get_main_vsi(pf);
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
bool dvm;
|
||||
@@ -7731,6 +7756,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
ice_rebuild_arfs(pf);
|
||||
}
|
||||
|
||||
if (vsi && vsi->netdev)
|
||||
netif_device_attach(vsi->netdev);
|
||||
|
||||
ice_update_pf_netdev_link(pf);
|
||||
|
||||
/* tell the firmware we are up */
|
||||
|
||||
@@ -39,7 +39,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
|
||||
sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
|
||||
memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
|
||||
sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
if (vsi->xdp_rings)
|
||||
memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
|
||||
sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
|
||||
}
|
||||
@@ -52,7 +52,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
|
||||
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
|
||||
{
|
||||
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
if (vsi->xdp_rings)
|
||||
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
|
||||
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
|
||||
}
|
||||
@@ -165,7 +165,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
struct ice_q_vector *q_vector;
|
||||
struct ice_tx_ring *tx_ring;
|
||||
struct ice_rx_ring *rx_ring;
|
||||
int timeout = 50;
|
||||
int fail = 0;
|
||||
int err;
|
||||
|
||||
@@ -176,13 +175,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
rx_ring = vsi->rx_rings[q_idx];
|
||||
q_vector = rx_ring->q_vector;
|
||||
|
||||
while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
|
||||
timeout--;
|
||||
if (!timeout)
|
||||
return -EBUSY;
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
synchronize_net();
|
||||
netif_carrier_off(vsi->netdev);
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
@@ -194,7 +186,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
|
||||
if (!fail)
|
||||
fail = err;
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
if (vsi->xdp_rings) {
|
||||
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
|
||||
|
||||
memset(&txq_meta, 0, sizeof(txq_meta));
|
||||
@@ -261,7 +253,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
|
||||
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
netif_carrier_on(vsi->netdev);
|
||||
}
|
||||
clear_bit(ICE_CFG_BUSY, vsi->state);
|
||||
|
||||
return fail;
|
||||
}
|
||||
@@ -390,7 +381,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
|
||||
if_running = !test_bit(ICE_VSI_DOWN, vsi->state) &&
|
||||
ice_is_xdp_ena_vsi(vsi);
|
||||
|
||||
if (if_running) {
|
||||
struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
|
||||
|
||||
@@ -6960,10 +6960,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
||||
|
||||
static void igb_tsync_interrupt(struct igb_adapter *adapter)
|
||||
{
|
||||
const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
|
||||
TSINTR_TT0 | TSINTR_TT1 |
|
||||
TSINTR_AUTT0 | TSINTR_AUTT1);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 tsicr = rd32(E1000_TSICR);
|
||||
struct ptp_clock_event event;
|
||||
|
||||
if (hw->mac.type == e1000_82580) {
|
||||
/* 82580 has a hardware bug that requires an explicit
|
||||
* write to clear the TimeSync interrupt cause.
|
||||
*/
|
||||
wr32(E1000_TSICR, tsicr & mask);
|
||||
}
|
||||
|
||||
if (tsicr & TSINTR_SYS_WRAP) {
|
||||
event.type = PTP_CLOCK_PPS;
|
||||
if (adapter->ptp_caps.pps)
|
||||
|
||||
@@ -7413,6 +7413,7 @@ static void igc_io_resume(struct pci_dev *pdev)
|
||||
rtnl_lock();
|
||||
if (netif_running(netdev)) {
|
||||
if (igc_open(netdev)) {
|
||||
rtnl_unlock();
|
||||
netdev_err(netdev, "igc_open failed after reset\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1442,18 +1442,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
|
||||
vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
|
||||
rule->cookie, false);
|
||||
|
||||
vcap_free_rule(rule);
|
||||
|
||||
/* Check that the rule has been freed: tricky to access since this
|
||||
* memory should not be accessible anymore
|
||||
*/
|
||||
KUNIT_EXPECT_PTR_NE(test, NULL, rule);
|
||||
ret = list_empty(&rule->keyfields);
|
||||
KUNIT_EXPECT_EQ(test, true, ret);
|
||||
ret = list_empty(&rule->actionfields);
|
||||
KUNIT_EXPECT_EQ(test, true, ret);
|
||||
|
||||
vcap_del_rule(&test_vctrl, &test_netdev, id);
|
||||
ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
|
||||
KUNIT_EXPECT_EQ(test, 0, ret);
|
||||
}
|
||||
|
||||
static void vcap_api_set_rule_counter_test(struct kunit *test)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user