You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull yet more networking updates from David Miller:
1) Various fixes to the new Redpine Signals wireless driver, from
Fariya Fatima.
2) L2TP PPP connect code takes PMTU from the wrong socket, fix from
Dmitry Petukhov.
3) UFO and TSO packets differ in whether they include the protocol
header in gso_size, account for that in skb_gso_transport_seglen().
From Florian Westphal.
4) If VLAN untagging fails, we double free the SKB in the bridging
output path. From Toshiaki Makita.
5) Several call sites of sk->sk_data_ready() were referencing an SKB
just added to the socket receive queue in order to calculate the
second argument via skb->len. This is dangerous because the moment
the skb is added to the receive queue it can be consumed in another
context and freed up.
It turns out also that none of the sk->sk_data_ready()
implementations even care about this second argument.
So just kill it off and thus fix all these use-after-free bugs as a
side effect.
6) Fix inverted test in tcp_v6_send_response(), from Lorenzo Colitti.
7) pktgen needs to do locking properly for LLTX devices, from Daniel
Borkmann.
8) xen-netfront driver initializes TX array entries in RX loop :-) From
Vincenzo Maffione.
9) After refactoring, some tunnel drivers allow a tunnel to be
configured on top itself. Fix from Nicolas Dichtel.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
vti: don't allow to add the same tunnel twice
gre: don't allow to add the same tunnel twice
drivers: net: xen-netfront: fix array initialization bug
pktgen: be friendly to LLTX devices
r8152: check RTL8152_UNPLUG
net: sun4i-emac: add promiscuous support
net/apne: replace IS_ERR and PTR_ERR with PTR_ERR_OR_ZERO
net: ipv6: Fix oif in TCP SYN+ACK route lookup.
drivers: net: cpsw: enable interrupts after napi enable and clearing previous interrupts
drivers: net: cpsw: discard all packets received when interface is down
net: Fix use after free by removing length arg from sk_data_ready callbacks.
Drivers: net: hyperv: Address UDP checksum issues
Drivers: net: hyperv: Negotiate suitable ndis version for offload support
Drivers: net: hyperv: Allocate memory for all possible per-pecket information
bridge: Fix double free and memory leak around br_allowed_ingress
bonding: Remove debug_fs files when module init fails
i40evf: program RSS LUT correctly
i40evf: remove open-coded skb_cow_head
ixgb: remove open-coded skb_cow_head
igbvf: remove open-coded skb_cow_head
...
This commit is contained in:
@@ -4492,6 +4492,7 @@ static int __init bonding_init(void)
|
||||
out:
|
||||
return res;
|
||||
err:
|
||||
bond_destroy_debugfs();
|
||||
bond_netlink_fini();
|
||||
err_link:
|
||||
unregister_pernet_subsys(&bond_net_ops);
|
||||
|
||||
@@ -560,9 +560,7 @@ static struct net_device *apne_dev;
|
||||
static int __init apne_module_init(void)
|
||||
{
|
||||
apne_dev = apne_probe(-1);
|
||||
if (IS_ERR(apne_dev))
|
||||
return PTR_ERR(apne_dev);
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(apne_dev);
|
||||
}
|
||||
|
||||
static void __exit apne_module_exit(void)
|
||||
|
||||
@@ -268,15 +268,6 @@ static unsigned int emac_setup(struct net_device *ndev)
|
||||
writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN,
|
||||
db->membase + EMAC_TX_MODE_REG);
|
||||
|
||||
/* set up RX */
|
||||
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
|
||||
|
||||
writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
|
||||
EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
|
||||
EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
|
||||
EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
|
||||
db->membase + EMAC_RX_CTL_REG);
|
||||
|
||||
/* set MAC */
|
||||
/* set MAC CTL0 */
|
||||
reg_val = readl(db->membase + EMAC_MAC_CTL0_REG);
|
||||
@@ -309,6 +300,26 @@ static unsigned int emac_setup(struct net_device *ndev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void emac_set_rx_mode(struct net_device *ndev)
|
||||
{
|
||||
struct emac_board_info *db = netdev_priv(ndev);
|
||||
unsigned int reg_val;
|
||||
|
||||
/* set up RX */
|
||||
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
|
||||
|
||||
if (ndev->flags & IFF_PROMISC)
|
||||
reg_val |= EMAC_RX_CTL_PASS_ALL_EN;
|
||||
else
|
||||
reg_val &= ~EMAC_RX_CTL_PASS_ALL_EN;
|
||||
|
||||
writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN |
|
||||
EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN |
|
||||
EMAC_RX_CTL_ACCEPT_MULTICAST_EN |
|
||||
EMAC_RX_CTL_ACCEPT_BROADCAST_EN,
|
||||
db->membase + EMAC_RX_CTL_REG);
|
||||
}
|
||||
|
||||
static unsigned int emac_powerup(struct net_device *ndev)
|
||||
{
|
||||
struct emac_board_info *db = netdev_priv(ndev);
|
||||
@@ -782,6 +793,7 @@ static const struct net_device_ops emac_netdev_ops = {
|
||||
.ndo_stop = emac_stop,
|
||||
.ndo_start_xmit = emac_start_xmit,
|
||||
.ndo_tx_timeout = emac_timeout,
|
||||
.ndo_set_rx_mode = emac_set_rx_mode,
|
||||
.ndo_do_ioctl = emac_ioctl,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2682,14 +2682,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
|
||||
u32 cmd_length = 0;
|
||||
u16 ipcse = 0, tucse, mss;
|
||||
u8 ipcss, ipcso, tucss, tucso, hdr_len;
|
||||
int err;
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
if (skb_header_cloned(skb)) {
|
||||
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
int err;
|
||||
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
|
||||
@@ -5100,16 +5100,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
u32 cmd_length = 0;
|
||||
u16 ipcse = 0, mss;
|
||||
u8 ipcss, ipcso, tucss, tucso, hdr_len;
|
||||
int err;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
return 0;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
|
||||
@@ -1114,20 +1114,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
|
||||
{
|
||||
u32 cd_cmd, cd_tso_len, cd_mss;
|
||||
struct ipv6hdr *ipv6h;
|
||||
struct tcphdr *tcph;
|
||||
struct iphdr *iph;
|
||||
u32 l4len;
|
||||
int err;
|
||||
struct ipv6hdr *ipv6h;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
return 0;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
|
||||
|
||||
@@ -1412,6 +1412,14 @@ restart_watchdog:
|
||||
schedule_work(&adapter->adminq_task);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_configure_rss - increment to next available tx queue
|
||||
* @adapter: board private structure
|
||||
* @j: queue counter
|
||||
*
|
||||
* Helper function for RSS programming to increment through available
|
||||
* queus. Returns the next queue value.
|
||||
**/
|
||||
static int next_queue(struct i40evf_adapter *adapter, int j)
|
||||
{
|
||||
j += 1;
|
||||
@@ -1451,10 +1459,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
|
||||
/* Populate the LUT with max no. of queues in round robin fashion */
|
||||
j = adapter->vsi_res->num_queue_pairs;
|
||||
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
|
||||
lut = next_queue(adapter, j);
|
||||
lut |= next_queue(adapter, j) << 8;
|
||||
lut |= next_queue(adapter, j) << 16;
|
||||
lut |= next_queue(adapter, j) << 24;
|
||||
j = next_queue(adapter, j);
|
||||
lut = j;
|
||||
j = next_queue(adapter, j);
|
||||
lut |= j << 8;
|
||||
j = next_queue(adapter, j);
|
||||
lut |= j << 16;
|
||||
j = next_queue(adapter, j);
|
||||
lut |= j << 24;
|
||||
wr32(hw, I40E_VFQF_HLUT(i), lut);
|
||||
}
|
||||
i40e_flush(hw);
|
||||
|
||||
@@ -241,7 +241,6 @@ struct igb_ring {
|
||||
struct igb_tx_buffer *tx_buffer_info;
|
||||
struct igb_rx_buffer *rx_buffer_info;
|
||||
};
|
||||
unsigned long last_rx_timestamp;
|
||||
void *desc; /* descriptor ring memory */
|
||||
unsigned long flags; /* ring specific flags */
|
||||
void __iomem *tail; /* pointer to ring tail register */
|
||||
@@ -437,6 +436,7 @@ struct igb_adapter {
|
||||
struct hwtstamp_config tstamp_config;
|
||||
unsigned long ptp_tx_start;
|
||||
unsigned long last_rx_ptp_check;
|
||||
unsigned long last_rx_timestamp;
|
||||
spinlock_t tmreg_lock;
|
||||
struct cyclecounter cc;
|
||||
struct timecounter tc;
|
||||
@@ -533,20 +533,6 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
|
||||
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
|
||||
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
|
||||
struct sk_buff *skb);
|
||||
static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
|
||||
union e1000_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
|
||||
!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
|
||||
igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
|
||||
|
||||
/* Update the last_rx_timestamp timer in order to enable watchdog check
|
||||
* for error case of latched timestamp on a dropped packet.
|
||||
*/
|
||||
rx_ring->last_rx_timestamp = jiffies;
|
||||
}
|
||||
|
||||
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
#ifdef CONFIG_IGB_HWMON
|
||||
|
||||
@@ -4605,6 +4605,7 @@ static int igb_tso(struct igb_ring *tx_ring,
|
||||
struct sk_buff *skb = first->skb;
|
||||
u32 vlan_macip_lens, type_tucmd;
|
||||
u32 mss_l4len_idx, l4len;
|
||||
int err;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0;
|
||||
@@ -4612,11 +4613,9 @@ static int igb_tso(struct igb_ring *tx_ring,
|
||||
if (!skb_is_gso(skb))
|
||||
return 0;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
|
||||
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
|
||||
@@ -6955,7 +6954,9 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
|
||||
|
||||
igb_rx_checksum(rx_ring, rx_desc, skb);
|
||||
|
||||
igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
|
||||
!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
|
||||
igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
|
||||
|
||||
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
|
||||
|
||||
@@ -427,10 +427,8 @@ static void igb_ptp_overflow_check(struct work_struct *work)
|
||||
void igb_ptp_rx_hang(struct igb_adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct igb_ring *rx_ring;
|
||||
u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
|
||||
unsigned long rx_event;
|
||||
int n;
|
||||
|
||||
if (hw->mac.type != e1000_82576)
|
||||
return;
|
||||
@@ -445,11 +443,8 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
|
||||
|
||||
/* Determine the most recent watchdog or rx_timestamp event */
|
||||
rx_event = adapter->last_rx_ptp_check;
|
||||
for (n = 0; n < adapter->num_rx_queues; n++) {
|
||||
rx_ring = adapter->rx_ring[n];
|
||||
if (time_after(rx_ring->last_rx_timestamp, rx_event))
|
||||
rx_event = rx_ring->last_rx_timestamp;
|
||||
}
|
||||
if (time_after(adapter->last_rx_timestamp, rx_event))
|
||||
rx_event = adapter->last_rx_timestamp;
|
||||
|
||||
/* Only need to read the high RXSTMP register to clear the lock */
|
||||
if (time_is_before_jiffies(rx_event + 5 * HZ)) {
|
||||
@@ -540,6 +535,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
|
||||
regval |= (u64)rd32(E1000_RXSTMPH) << 32;
|
||||
|
||||
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
|
||||
|
||||
/* Update the last_rx_timestamp timer in order to enable watchdog check
|
||||
* for error case of latched timestamp on a dropped packet.
|
||||
*/
|
||||
adapter->last_rx_timestamp = jiffies;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1910,20 +1910,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
|
||||
struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
|
||||
{
|
||||
struct e1000_adv_tx_context_desc *context_desc;
|
||||
unsigned int i;
|
||||
int err;
|
||||
struct igbvf_buffer *buffer_info;
|
||||
u32 info = 0, tu_cmd = 0;
|
||||
u32 mss_l4len_idx, l4len;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
*hdr_len = 0;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"igbvf_tso returning an error\n");
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0) {
|
||||
dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
l4len = tcp_hdrlen(skb);
|
||||
|
||||
@@ -1220,17 +1220,15 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||
unsigned int i;
|
||||
u8 ipcss, ipcso, tucss, tucso, hdr_len;
|
||||
u16 ipcse, tucse, mss;
|
||||
int err;
|
||||
|
||||
if (likely(skb_is_gso(skb))) {
|
||||
struct ixgb_buffer *buffer_info;
|
||||
struct iphdr *iph;
|
||||
int err;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
|
||||
@@ -811,6 +811,7 @@ enum ixgbe_state_t {
|
||||
__IXGBE_DISABLED,
|
||||
__IXGBE_REMOVING,
|
||||
__IXGBE_SERVICE_SCHED,
|
||||
__IXGBE_SERVICE_INITED,
|
||||
__IXGBE_IN_SFP_INIT,
|
||||
__IXGBE_PTP_RUNNING,
|
||||
__IXGBE_PTP_TX_IN_PROGRESS,
|
||||
|
||||
@@ -297,7 +297,8 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
|
||||
return;
|
||||
hw->hw_addr = NULL;
|
||||
e_dev_err("Adapter removed\n");
|
||||
ixgbe_service_event_schedule(adapter);
|
||||
if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
|
||||
ixgbe_service_event_schedule(adapter);
|
||||
}
|
||||
|
||||
void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
|
||||
@@ -6509,6 +6510,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
||||
struct sk_buff *skb = first->skb;
|
||||
u32 vlan_macip_lens, type_tucmd;
|
||||
u32 mss_l4len_idx, l4len;
|
||||
int err;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0;
|
||||
@@ -6516,11 +6518,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
||||
if (!skb_is_gso(skb))
|
||||
return 0;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
|
||||
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
|
||||
@@ -7077,8 +7077,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||
IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
|
||||
if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
|
||||
struct vlan_ethhdr *vhdr;
|
||||
if (skb_header_cloned(skb) &&
|
||||
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
|
||||
|
||||
if (skb_cow_head(skb, 0))
|
||||
goto out_drop;
|
||||
vhdr = (struct vlan_ethhdr *)skb->data;
|
||||
vhdr->h_vlan_TCI = htons(tx_flags >>
|
||||
@@ -8023,6 +8023,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
/* EEPROM */
|
||||
memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
|
||||
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
|
||||
if (ixgbe_removed(hw->hw_addr)) {
|
||||
err = -EIO;
|
||||
goto err_ioremap;
|
||||
}
|
||||
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
|
||||
if (!(eec & (1 << 8)))
|
||||
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
|
||||
@@ -8185,7 +8189,12 @@ skip_sriov:
|
||||
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
|
||||
(unsigned long) adapter);
|
||||
|
||||
if (ixgbe_removed(hw->hw_addr)) {
|
||||
err = -EIO;
|
||||
goto err_sw_init;
|
||||
}
|
||||
INIT_WORK(&adapter->service_task, ixgbe_service_task);
|
||||
set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
|
||||
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
|
||||
|
||||
err = ixgbe_init_interrupt_scheme(adapter);
|
||||
@@ -8494,6 +8503,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
|
||||
|
||||
skip_bad_vf_detection:
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
rtnl_lock();
|
||||
netif_device_detach(netdev);
|
||||
|
||||
|
||||
@@ -421,6 +421,7 @@ enum ixbgevf_state_t {
|
||||
__IXGBEVF_DOWN,
|
||||
__IXGBEVF_DISABLED,
|
||||
__IXGBEVF_REMOVING,
|
||||
__IXGBEVF_WORK_INIT,
|
||||
};
|
||||
|
||||
struct ixgbevf_cb {
|
||||
|
||||
@@ -107,7 +107,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
|
||||
return;
|
||||
hw->hw_addr = NULL;
|
||||
dev_err(&adapter->pdev->dev, "Adapter removed\n");
|
||||
schedule_work(&adapter->watchdog_task);
|
||||
if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
|
||||
schedule_work(&adapter->watchdog_task);
|
||||
}
|
||||
|
||||
static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
|
||||
@@ -2838,6 +2839,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
|
||||
struct sk_buff *skb = first->skb;
|
||||
u32 vlan_macip_lens, type_tucmd;
|
||||
u32 mss_l4len_idx, l4len;
|
||||
int err;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0;
|
||||
@@ -2845,11 +2847,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
|
||||
if (!skb_is_gso(skb))
|
||||
return 0;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
|
||||
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
|
||||
@@ -3573,8 +3573,13 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
adapter->watchdog_timer.function = ixgbevf_watchdog;
|
||||
adapter->watchdog_timer.data = (unsigned long)adapter;
|
||||
|
||||
if (IXGBE_REMOVED(hw->hw_addr)) {
|
||||
err = -EIO;
|
||||
goto err_sw_init;
|
||||
}
|
||||
INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
|
||||
INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
|
||||
set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
|
||||
|
||||
err = ixgbevf_init_interrupt_scheme(adapter);
|
||||
if (err)
|
||||
@@ -3667,6 +3672,9 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
rtnl_lock();
|
||||
netif_device_detach(netdev);
|
||||
|
||||
|
||||
@@ -687,7 +687,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
||||
|
||||
cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
|
||||
|
||||
if (unlikely(status < 0)) {
|
||||
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
|
||||
/* the interface is going down, skbs are purged */
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
@@ -1201,8 +1201,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
||||
for_each_slave(priv, cpsw_slave_open, priv);
|
||||
|
||||
/* Add default VLAN */
|
||||
if (!priv->data.dual_emac)
|
||||
cpsw_add_default_vlan(priv);
|
||||
cpsw_add_default_vlan(priv);
|
||||
|
||||
if (!cpsw_common_res_usage_state(priv)) {
|
||||
/* setup tx dma to fixed prio and zero offset */
|
||||
@@ -1253,6 +1252,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
||||
cpsw_set_coalesce(ndev, &coal);
|
||||
}
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
cpdma_ctlr_start(priv->dma);
|
||||
cpsw_intr_enable(priv);
|
||||
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
||||
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
|
||||
|
||||
prim_cpsw = cpsw_get_slave_priv(priv, 0);
|
||||
if (prim_cpsw->irq_enabled == false) {
|
||||
if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
|
||||
@@ -1261,12 +1266,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
||||
}
|
||||
}
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
cpdma_ctlr_start(priv->dma);
|
||||
cpsw_intr_enable(priv);
|
||||
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
||||
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
|
||||
|
||||
if (priv->data.dual_emac)
|
||||
priv->slaves[priv->emac_port].open_stat = true;
|
||||
return 0;
|
||||
|
||||
@@ -747,6 +747,7 @@ struct ndis_oject_header {
|
||||
#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
|
||||
#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
|
||||
|
||||
#define VERSION_4_OFFLOAD_SIZE 22
|
||||
/*
|
||||
* New offload OIDs for NDIS 6
|
||||
*/
|
||||
|
||||
@@ -344,7 +344,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
|
||||
memset(init_packet, 0, sizeof(struct nvsp_message));
|
||||
|
||||
if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
|
||||
ndis_version = 0x00050001;
|
||||
ndis_version = 0x00060001;
|
||||
else
|
||||
ndis_version = 0x0006001e;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user