You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix object leak on IPSEC offload failure, from Steffen Klassert.
2) Fix range checks in ipset address range addition operations, from
Jozsef Kadlecsik.
3) Fix pernet ops unregistration order in ipset, from Florian Westphal.
4) Add missing netlink attribute policy for nl80211 packet pattern
attrs, from Peng Xu.
5) Fix PPP device destruction race, from Guillaume Nault.
6) Write marks get lost when BPF verifier processes R1=R2 register
assignments, causing incorrect liveness information and less state
pruning. Fix from Alexei Starovoitov.
7) Fix blockhole routes so that they are marked dead and therefore not
cached in sockets, otherwise IPSEC stops working. From Steffen
Klassert.
8) Fix broadcast handling of UDP socket early demux, from Paolo Abeni.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (37 commits)
cdc_ether: flag the u-blox TOBY-L2 and SARA-U2 as wwan
net: thunderx: mark expected switch fall-throughs in nicvf_main()
udp: fix bcast packet reception
netlink: do not set cb_running if dump's start() errs
ipv4: Fix traffic triggered IPsec connections.
ipv6: Fix traffic triggered IPsec connections.
ixgbe: incorrect XDP ring accounting in ethtool tx_frame param
net: ixgbe: Use new PCI_DEV_FLAGS_NO_RELAXED_ORDERING flag
Revert commit 1a8b6d76dc ("net:add one common config...")
ixgbe: fix masking of bits read from IXGBE_VXLANCTRL register
ixgbe: Return error when getting PHY address if PHY access is not supported
netfilter: xt_bpf: Fix XT_BPF_MODE_FD_PINNED mode of 'xt_bpf_info_v1'
netfilter: SYNPROXY: skip non-tcp packet in {ipv4, ipv6}_synproxy_hook
tipc: Unclone message at secondary destination lookup
tipc: correct initialization of skb list
gso: fix payload length when gso_size is zero
mlxsw: spectrum_router: Avoid expensive lookup during route removal
bpf: fix liveness marking
doc: Fix typo "8023.ad" in bonding documentation
ipv6: fix net.ipv6.conf.all.accept_dad behaviour for real
...
This commit is contained in:
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
|
||||
and packet type ID), so in a "gatewayed" configuration, all
|
||||
outgoing traffic will generally use the same device. Incoming
|
||||
traffic may also end up on a single device, but that is
|
||||
dependent upon the balancing policy of the peer's 8023.ad
|
||||
dependent upon the balancing policy of the peer's 802.3ad
|
||||
implementation. In a "local" configuration, traffic will be
|
||||
distributed across the devices in the bond.
|
||||
|
||||
|
||||
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
|
||||
and non-text memory will be made non-executable. This provides
|
||||
protection against certain security exploits (e.g. writing to text)
|
||||
|
||||
config ARCH_WANT_RELAX_ORDER
|
||||
bool
|
||||
|
||||
config ARCH_HAS_REFCOUNT
|
||||
bool
|
||||
help
|
||||
|
||||
@@ -44,7 +44,6 @@ config SPARC
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
select LOCKDEP_SMALL if LOCKDEP
|
||||
select ARCH_WANT_RELAX_ORDER
|
||||
|
||||
config SPARC32
|
||||
def_bool !64BIT
|
||||
|
||||
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
||||
return true;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(action);
|
||||
/* fall through */
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(nic->netdev, prog, action);
|
||||
/* fall through */
|
||||
case XDP_DROP:
|
||||
/* Check if it's a recycled page, if not
|
||||
* unmap the DMA mapping.
|
||||
|
||||
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
|
||||
**/
|
||||
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
|
||||
{
|
||||
#ifndef CONFIG_SPARC
|
||||
u32 regval;
|
||||
u32 i;
|
||||
#endif
|
||||
s32 ret_val;
|
||||
|
||||
ret_val = ixgbe_start_hw_generic(hw);
|
||||
|
||||
#ifndef CONFIG_SPARC
|
||||
/* Disable relaxed ordering */
|
||||
for (i = 0; ((i < hw->mac.max_tx_queues) &&
|
||||
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
|
||||
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
|
||||
}
|
||||
|
||||
for (i = 0; ((i < hw->mac.max_rx_queues) &&
|
||||
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
|
||||
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
|
||||
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
|
||||
}
|
||||
#endif
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
|
||||
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
|
||||
}
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
|
||||
/* Disable relaxed ordering */
|
||||
for (i = 0; i < hw->mac.max_tx_queues; i++) {
|
||||
u32 regval;
|
||||
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
|
||||
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
|
||||
}
|
||||
|
||||
for (i = 0; i < hw->mac.max_rx_queues; i++) {
|
||||
u32 regval;
|
||||
|
||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
|
||||
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
|
||||
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_ring *temp_ring;
|
||||
int i, err = 0;
|
||||
int i, j, err = 0;
|
||||
u32 new_rx_count, new_tx_count;
|
||||
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
||||
}
|
||||
|
||||
/* allocate temporary buffer to store rings in */
|
||||
i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
|
||||
i = max_t(int, i, adapter->num_xdp_queues);
|
||||
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
|
||||
adapter->num_rx_queues);
|
||||
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
|
||||
|
||||
if (!temp_ring) {
|
||||
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
memcpy(&temp_ring[i], adapter->xdp_ring[i],
|
||||
for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
|
||||
memcpy(&temp_ring[i], adapter->xdp_ring[j],
|
||||
sizeof(struct ixgbe_ring));
|
||||
|
||||
temp_ring[i].count = new_tx_count;
|
||||
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
||||
memcpy(adapter->tx_ring[i], &temp_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
ixgbe_free_tx_resources(adapter->xdp_ring[i]);
|
||||
for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
|
||||
ixgbe_free_tx_resources(adapter->xdp_ring[j]);
|
||||
|
||||
memcpy(adapter->xdp_ring[i], &temp_ring[i],
|
||||
memcpy(adapter->xdp_ring[j], &temp_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
|
||||
|
||||
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
|
||||
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
|
||||
return;
|
||||
|
||||
vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
|
||||
vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
|
||||
|
||||
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
|
||||
@@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
|
||||
return ixgbe_ptp_set_ts_config(adapter, req);
|
||||
case SIOCGHWTSTAMP:
|
||||
return ixgbe_ptp_get_ts_config(adapter, req);
|
||||
case SIOCGMIIPHY:
|
||||
if (!adapter->hw.phy.ops.read_reg)
|
||||
return -EOPNOTSUPP;
|
||||
/* fall through */
|
||||
default:
|
||||
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
|
||||
}
|
||||
|
||||
@@ -3505,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
|
||||
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_fib *fib)
|
||||
{
|
||||
struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
|
||||
struct mlxsw_sp_lpm_tree *lpm_tree;
|
||||
|
||||
/* Aggregate prefix lengths across all virtual routers to make
|
||||
* sure we only have used prefix lengths in the LPM tree.
|
||||
*/
|
||||
mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
|
||||
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
|
||||
fib->proto);
|
||||
if (IS_ERR(lpm_tree))
|
||||
goto err_tree_get;
|
||||
mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
|
||||
|
||||
err_tree_get:
|
||||
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
|
||||
return;
|
||||
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
|
||||
|
||||
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
|
||||
|
||||
static int ppp_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct ppp *ppp;
|
||||
|
||||
netdev_lockdep_set_classes(dev);
|
||||
|
||||
ppp = netdev_priv(dev);
|
||||
/* Let the netdevice take a reference on the ppp file. This ensures
|
||||
* that ppp_destroy_interface() won't run before the device gets
|
||||
* unregistered.
|
||||
*/
|
||||
atomic_inc(&ppp->file.refcnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
|
||||
wake_up_interruptible(&ppp->file.rwait);
|
||||
}
|
||||
|
||||
static void ppp_dev_priv_destructor(struct net_device *dev)
|
||||
{
|
||||
struct ppp *ppp;
|
||||
|
||||
ppp = netdev_priv(dev);
|
||||
if (atomic_dec_and_test(&ppp->file.refcnt))
|
||||
ppp_destroy_interface(ppp);
|
||||
}
|
||||
|
||||
static const struct net_device_ops ppp_netdev_ops = {
|
||||
.ndo_init = ppp_dev_init,
|
||||
.ndo_uninit = ppp_dev_uninit,
|
||||
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
|
||||
dev->tx_queue_len = 3;
|
||||
dev->type = ARPHRD_PPP;
|
||||
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
|
||||
dev->priv_destructor = ppp_dev_priv_destructor;
|
||||
netif_keep_dst(dev);
|
||||
}
|
||||
|
||||
|
||||
@@ -560,6 +560,7 @@ static const struct driver_info wwan_info = {
|
||||
#define NVIDIA_VENDOR_ID 0x0955
|
||||
#define HP_VENDOR_ID 0x03f0
|
||||
#define MICROSOFT_VENDOR_ID 0x045e
|
||||
#define UBLOX_VENDOR_ID 0x1546
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
/* BLACKLIST !!
|
||||
@@ -868,6 +869,18 @@ static const struct usb_device_id products[] = {
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&zte_cdc_info,
|
||||
}, {
|
||||
/* U-blox TOBY-L2 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
/* U-blox SARA-U2 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
}, {
|
||||
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
|
||||
USB_CDC_PROTO_NONE),
|
||||
|
||||
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int bpf_obj_get_user(const char __user *pathname)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
|
||||
@@ -108,9 +108,10 @@ struct ebt_table {
|
||||
|
||||
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
|
||||
~(__alignof__(struct _xt_align)-1))
|
||||
extern struct ebt_table *ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern int ebt_register_table(struct net *net,
|
||||
const struct ebt_table *table,
|
||||
const struct nf_hook_ops *ops,
|
||||
struct ebt_table **res);
|
||||
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
|
||||
const struct nf_hook_ops *);
|
||||
extern unsigned int ebt_do_table(struct sk_buff *skb,
|
||||
|
||||
@@ -23,6 +23,7 @@ enum xt_bpf_modes {
|
||||
XT_BPF_MODE_FD_PINNED,
|
||||
XT_BPF_MODE_FD_ELF,
|
||||
};
|
||||
#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
|
||||
|
||||
struct xt_bpf_info_v1 {
|
||||
__u16 mode;
|
||||
|
||||
@@ -363,6 +363,7 @@ out:
|
||||
putname(pname);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_obj_get_user);
|
||||
|
||||
static void bpf_evict_inode(struct inode *inode)
|
||||
{
|
||||
|
||||
@@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
|
||||
{
|
||||
struct bpf_verifier_state *parent = state->parent;
|
||||
|
||||
if (regno == BPF_REG_FP)
|
||||
/* We don't need to worry about FP liveness because it's read-only */
|
||||
return;
|
||||
|
||||
while (parent) {
|
||||
/* if read wasn't screened by an earlier write ... */
|
||||
if (state->regs[regno].live & REG_LIVE_WRITTEN)
|
||||
@@ -2345,6 +2349,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
* copy register state to dest reg
|
||||
*/
|
||||
regs[insn->dst_reg] = regs[insn->src_reg];
|
||||
regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
|
||||
} else {
|
||||
/* R1 = (u32) R2 */
|
||||
if (is_pointer_value(env, insn->src_reg)) {
|
||||
|
||||
@@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
|
||||
|
||||
static int __net_init broute_net_init(struct net *net)
|
||||
{
|
||||
net->xt.broute_table = ebt_register_table(net, &broute_table, NULL);
|
||||
return PTR_ERR_OR_ZERO(net->xt.broute_table);
|
||||
return ebt_register_table(net, &broute_table, NULL,
|
||||
&net->xt.broute_table);
|
||||
}
|
||||
|
||||
static void __net_exit broute_net_exit(struct net *net)
|
||||
|
||||
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
|
||||
|
||||
static int __net_init frame_filter_net_init(struct net *net)
|
||||
{
|
||||
net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter);
|
||||
return PTR_ERR_OR_ZERO(net->xt.frame_filter);
|
||||
return ebt_register_table(net, &frame_filter, ebt_ops_filter,
|
||||
&net->xt.frame_filter);
|
||||
}
|
||||
|
||||
static void __net_exit frame_filter_net_exit(struct net *net)
|
||||
|
||||
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
|
||||
|
||||
static int __net_init frame_nat_net_init(struct net *net)
|
||||
{
|
||||
net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat);
|
||||
return PTR_ERR_OR_ZERO(net->xt.frame_nat);
|
||||
return ebt_register_table(net, &frame_nat, ebt_ops_nat,
|
||||
&net->xt.frame_nat);
|
||||
}
|
||||
|
||||
static void __net_exit frame_nat_net_exit(struct net *net)
|
||||
|
||||
@@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
struct ebt_table *
|
||||
ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
const struct nf_hook_ops *ops)
|
||||
int ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
const struct nf_hook_ops *ops, struct ebt_table **res)
|
||||
{
|
||||
struct ebt_table_info *newinfo;
|
||||
struct ebt_table *t, *table;
|
||||
@@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
repl->entries == NULL || repl->entries_size == 0 ||
|
||||
repl->counters != NULL || input_table->private != NULL) {
|
||||
BUGPRINT("Bad table data for ebt_register_table!!!\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Don't add one table to multiple lists. */
|
||||
@@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
|
||||
mutex_unlock(&ebt_mutex);
|
||||
|
||||
WRITE_ONCE(*res, table);
|
||||
|
||||
if (!ops)
|
||||
return table;
|
||||
return 0;
|
||||
|
||||
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
|
||||
if (ret) {
|
||||
__ebt_unregister_table(net, table);
|
||||
return ERR_PTR(ret);
|
||||
*res = NULL;
|
||||
}
|
||||
|
||||
return table;
|
||||
return ret;
|
||||
free_unlock:
|
||||
mutex_unlock(&ebt_mutex);
|
||||
free_chainstack:
|
||||
@@ -1276,7 +1277,7 @@ free_newinfo:
|
||||
free_table:
|
||||
kfree(table);
|
||||
out:
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ebt_unregister_table(struct net *net, struct ebt_table *table,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user