Merge 5.10.97 into android12-5.10-lts

Changes in 5.10.97
	PCI: pciehp: Fix infinite loop in IRQ handler upon power fault
	net: ipa: fix atomic update in ipa_endpoint_replenish()
	net: ipa: use a bitmap for endpoint replenish_enabled
	net: ipa: prevent concurrent replenish
	Revert "drivers: bus: simple-pm-bus: Add support for probing simple bus only devices"
	KVM: x86: Forcibly leave nested virt when SMM state is toggled
	psi: Fix uaf issue when psi trigger is destroyed while being polled
	x86/mce: Add Xeon Sapphire Rapids to list of CPUs that support PPIN
	x86/cpu: Add Xeon Icelake-D to list of CPUs that support PPIN
	drm/vc4: hdmi: Make sure the device is powered with CEC
	cgroup-v1: Require capabilities to set release_agent
	net/mlx5e: Fix handling of wrong devices during bond netevent
	net/mlx5: Use del_timer_sync in fw reset flow of halting poll
	net/mlx5: E-Switch, Fix uninitialized variable modact
	ipheth: fix EOVERFLOW in ipheth_rcvbulk_callback
	net: amd-xgbe: ensure to reset the tx_timer_active flag
	net: amd-xgbe: Fix skb data length underflow
	fanotify: Fix stale file descriptor in copy_event_to_user()
	net: sched: fix use-after-free in tc_new_tfilter()
	rtnetlink: make sure to refresh master_dev/m_ops in __rtnl_newlink()
	cpuset: Fix the bug that subpart_cpus updated wrongly in update_cpumask()
	af_packet: fix data-race in packet_setsockopt / packet_setsockopt
	tcp: add missing tcp_skb_can_collapse() test in tcp_shift_skb_data()
	Linux 5.10.97

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I428a930b475ba1b15d4b1ad05dde7df36cec6405
This commit is contained in:
Greg Kroah-Hartman
2022-02-08 10:08:24 +01:00
30 changed files with 176 additions and 150 deletions

View File

@@ -92,7 +92,8 @@ Triggers can be set on more than one psi metric and more than one trigger
for the same psi metric can be specified. However for each trigger a separate
file descriptor is required to be able to poll it separately from others,
therefore for each trigger a separate open() syscall should be made even
when opening the same psi interface file.
when opening the same psi interface file. Write operations to a file descriptor
with an already existing psi trigger will fail with EBUSY.
Monitors activate only when system enters stall state for the monitored
psi metric and deactivates upon exit from the stall state. While system is

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 96
SUBLEVEL = 97
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -1285,6 +1285,7 @@ struct kvm_x86_ops {
};
struct kvm_x86_nested_ops {
void (*leave_nested)(struct kvm_vcpu *vcpu);
int (*check_events)(struct kvm_vcpu *vcpu);
bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
int (*get_state)(struct kvm_vcpu *vcpu,

View File

@@ -486,6 +486,8 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:

View File

@@ -783,8 +783,10 @@ void svm_free_nested(struct vcpu_svm *svm)
/*
* Forcibly leave nested mode in order to be able to reset the VCPU later on.
*/
void svm_leave_nested(struct vcpu_svm *svm)
void svm_leave_nested(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(&svm->vcpu)) {
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
@@ -1185,7 +1187,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
svm_leave_nested(svm);
svm_leave_nested(vcpu);
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;
}
@@ -1238,6 +1240,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
hsave->save = *save;
if (is_guest_mode(vcpu))
svm_leave_nested(vcpu);
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
load_nested_vmcb_control(svm, ctl);
nested_prepare_vmcb_control(svm);
@@ -1252,6 +1257,7 @@ out_free:
}
struct kvm_x86_nested_ops svm_nested_ops = {
.leave_nested = svm_leave_nested,
.check_events = svm_check_nested_events,
.get_nested_state_pages = svm_get_nested_state_pages,
.get_state = svm_get_nested_state,

View File

@@ -279,7 +279,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
if (!(efer & EFER_SVME)) {
svm_leave_nested(svm);
svm_leave_nested(vcpu);
svm_set_gif(svm, true);
/*

View File

@@ -393,7 +393,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb);
void svm_leave_nested(struct vcpu_svm *svm);
void svm_leave_nested(struct kvm_vcpu *vcpu);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct vcpu_svm *svm);

View File

@@ -6628,6 +6628,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
}
struct kvm_x86_nested_ops vmx_nested_ops = {
.leave_nested = vmx_leave_nested,
.check_events = vmx_check_nested_events,
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state,

View File

@@ -4391,6 +4391,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.hflags |= HF_SMM_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_MASK;
kvm_x86_ops.nested_ops->leave_nested(vcpu);
kvm_smm_changed(vcpu);
}

View File

@@ -16,33 +16,7 @@
static int simple_pm_bus_probe(struct platform_device *pdev)
{
const struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct of_device_id *match;
/*
* Allow user to use driver_override to bind this driver to a
* transparent bus device which has a different compatible string
* that's not listed in simple_pm_bus_of_match. We don't want to do any
* of the simple-pm-bus tasks for these devices, so return early.
*/
if (pdev->driver_override)
return 0;
match = of_match_device(dev->driver->of_match_table, dev);
/*
* These are transparent bus devices (not simple-pm-bus matches) that
* have their child nodes populated automatically. So, don't need to
* do anything more. We only match with the device if this driver is
* the most specific match because we don't want to incorrectly bind to
* a device that has a more specific driver.
*/
if (match && match->data) {
if (of_property_match_string(np, "compatible", match->compatible) == 0)
return 0;
else
return -ENODEV;
}
struct device_node *np = pdev->dev.of_node;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -56,25 +30,14 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
static int simple_pm_bus_remove(struct platform_device *pdev)
{
const void *data = of_device_get_match_data(&pdev->dev);
if (pdev->driver_override || data)
return 0;
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_disable(&pdev->dev);
return 0;
}
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-pm-bus", },
{ .compatible = "simple-bus", .data = ONLY_BUS },
{ .compatible = "simple-mfd", .data = ONLY_BUS },
{ .compatible = "isa", .data = ONLY_BUS },
{ .compatible = "arm,amba-bus", .data = ONLY_BUS },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);

View File

@@ -1402,18 +1402,18 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
u32 val;
int ret;
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret)
return ret;
val = HDMI_READ(HDMI_CEC_CNTRL_5);
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
if (enable) {
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret)
return ret;
val = HDMI_READ(HDMI_CEC_CNTRL_5);
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
@@ -1439,7 +1439,10 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
pm_runtime_put(&vc4_hdmi->pdev->dev);
}
return 0;
}
@@ -1531,8 +1534,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
if (ret < 0)
goto err_delete_cec_adap;
pm_runtime_put(&vc4_hdmi->pdev->dev);
return 0;
err_delete_cec_adap:

View File

@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;
/* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
channel->tx_timer_active = 0;
}
}
@@ -2557,6 +2559,14 @@ read_again:
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
if (buf2_len > rdata->rx.buf.dma_len) {
/* Hardware inconsistency within the descriptors
* that has resulted in a length underflow.
*/
error = 1;
goto skip_data;
}
if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
@@ -2586,8 +2596,10 @@ skip_data:
if (!last || context_next)
goto read_again;
if (!skb)
if (!skb || error) {
dev_kfree_skb(skb);
goto next_packet;
}
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;

View File

@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
return false;
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
}
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num;
int err;
if (!mlx5e_rep_is_lag_netdev(netdev))
return;
info = ptr;
lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */
@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev;
struct mlx5e_priv *priv;
if (!mlx5e_rep_is_lag_netdev(netdev))
return;
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
lag_dev = info->upper_dev;
@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_bond *bond;
struct mlx5e_priv *priv;
if (!mlx5e_rep_is_lag_netdev(netdev))
return NOTIFY_DONE;
bond = container_of(nb, struct mlx5e_rep_bond, nb);
priv = netdev_priv(netdev);
rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
/* Verify VF representor is on the same device of the bond handling the netevent. */
if (rpriv->uplink_priv.bond != bond)
return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGELOWERSTATE:

View File

@@ -131,7 +131,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
del_timer(&fw_reset->timer);
del_timer_sync(&fw_reset->timer);
}
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)

View File

@@ -292,7 +292,7 @@ static int
create_chain_restore(struct fs_chain *chain)
{
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr;

View File

@@ -901,27 +901,35 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
struct gsi *gsi;
u32 backlog;
if (!endpoint->replenish_enabled) {
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
if (count)
atomic_add(count, &endpoint->replenish_saved);
return;
}
/* If already active, just update the backlog */
if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
if (count)
atomic_add(count, &endpoint->replenish_backlog);
return;
}
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
if (count)
atomic_add(count, &endpoint->replenish_backlog);
return;
try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
if (count)
atomic_add(count, &endpoint->replenish_backlog);
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@@ -941,7 +949,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
u32 max_backlog;
u32 saved;
endpoint->replenish_enabled = true;
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
atomic_add(saved, &endpoint->replenish_backlog);
@@ -955,7 +963,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
u32 backlog;
endpoint->replenish_enabled = false;
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
atomic_add(backlog, &endpoint->replenish_saved);
}
@@ -1472,7 +1480,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
*/
endpoint->replenish_enabled = false;
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
atomic_set(&endpoint->replenish_saved,
gsi_channel_tre_max(gsi, endpoint->channel_id));
atomic_set(&endpoint->replenish_backlog, 0);

View File

@@ -39,6 +39,19 @@ enum ipa_endpoint_name {
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
/**
* enum ipa_replenish_flag: RX buffer replenish flags
*
* @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
* @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
* @IPA_REPLENISH_COUNT: Number of defined replenish flags
*/
enum ipa_replenish_flag {
IPA_REPLENISH_ENABLED,
IPA_REPLENISH_ACTIVE,
IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
};
/**
* struct ipa_endpoint - IPA endpoint information
* @channel_id: EP's GSI channel
@@ -60,7 +73,7 @@ struct ipa_endpoint {
struct net_device *netdev;
/* Receive buffer replenishing for RX endpoints */
bool replenish_enabled;
DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
u32 replenish_ready;
atomic_t replenish_saved;
atomic_t replenish_backlog;

View File

@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (tx_buf == NULL)
goto free_rx_urb;
rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -146,7 +146,7 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
iphone->rx_urb->transfer_dma);
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
iphone->tx_urb->transfer_dma);
@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
usb_fill_bulk_urb(dev->rx_urb, udev,
usb_rcvbulkpipe(udev, dev->bulk_in),
dev->rx_buf, IPHETH_BUF_SIZE,
dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
ipheth_rcvbulk_callback,
dev);
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

View File

@@ -642,6 +642,8 @@ read_status:
*/
if (ctrl->power_fault_detected)
status &= ~PCI_EXP_SLTSTA_PFD;
else if (status & PCI_EXP_SLTSTA_PFD)
ctrl->power_fault_detected = true;
events |= status;
if (!events) {
@@ -651,7 +653,7 @@ read_status:
}
if (status) {
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
/*
* In MSI mode, all event bits must be zero before the port
@@ -725,8 +727,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
}
/* Check Power Fault Detected */
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
if (events & PCI_EXP_SLTSTA_PFD) {
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
PCI_EXP_SLTCTL_ATTN_IND_ON);

View File

@@ -366,9 +366,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
if (fanotify_is_perm_event(event->mask))
FANOTIFY_PERM(event)->fd = fd;
if (f)
fd_install(fd, f);
/* Event info records order is: dir fid + name, child fid */
if (fanotify_event_dir_fh_len(event)) {
info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
@@ -432,6 +429,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
count -= ret;
}
if (f)
fd_install(fd, f);
return metadata.event_len;
out_close_fd:

Some files were not shown because too many files have changed in this diff Show More