Merge 5.10.113 into android12-5.10-lts

Changes in 5.10.113
	etherdevice: Adjust ether_addr* prototypes to silence -Wstringop-overead
	mm: page_alloc: fix building error on -Werror=array-compare
	tracing: Dump stacktrace trigger to the corresponding instance
	perf tools: Fix segfault accessing sample_id xyarray
	gfs2: assign rgrp glock before compute_bitstructs
	net/sched: cls_u32: fix netns refcount changes in u32_change()
	ALSA: usb-audio: Clear MIDI port active flag after draining
	ALSA: hda/realtek: Add quirk for Clevo NP70PNP
	dm: fix mempool NULL pointer race when completing IO
	ASoC: atmel: Remove system clock tree configuration for at91sam9g20ek
	ASoC: msm8916-wcd-digital: Check failure for devm_snd_soc_register_component
	ASoC: codecs: wcd934x: do not switch off SIDO Buck when codec is in use
	dmaengine: imx-sdma: Fix error checking in sdma_event_remap
	dmaengine: mediatek:Fix PM usage reference leak of mtk_uart_apdma_alloc_chan_resources
	spi: spi-mtk-nor: initialize spi controller after resume
	esp: limit skb_page_frag_refill use to a single page
	igc: Fix infinite loop in release_swfw_sync
	igc: Fix BUG: scheduling while atomic
	rxrpc: Restore removed timer deletion
	net/smc: Fix sock leak when release after smc_shutdown()
	net/packet: fix packet_sock xmit return value checking
	ip6_gre: Avoid updating tunnel->tun_hlen in __gre6_xmit()
	ip6_gre: Fix skb_under_panic in __gre6_xmit()
	net/sched: cls_u32: fix possible leak in u32_init_knode()
	l3mdev: l3mdev_master_upper_ifindex_by_index_rcu should be using netdev_master_upper_dev_get_rcu
	ipv6: make ip6_rt_gc_expire an atomic_t
	netlink: reset network and mac headers in netlink_dump()
	net: stmmac: Use readl_poll_timeout_atomic() in atomic state
	dmaengine: idxd: add RO check for wq max_batch_size write
	dmaengine: idxd: add RO check for wq max_transfer_size write
	selftests: mlxsw: vxlan_flooding: Prevent flooding of unwanted packets
	arm64/mm: Remove [PUD|PMD]_TABLE_BIT from [pud|pmd]_bad()
	arm64: mm: fix p?d_leaf()
	ARM: vexpress/spc: Avoid negative array index when !SMP
	reset: tegra-bpmp: Restore Handle errors in BPMP response
	platform/x86: samsung-laptop: Fix an unsigned comparison which can never be negative
	ALSA: usb-audio: Fix undefined behavior due to shift overflowing the constant
	arm64: dts: imx: Fix imx8*-var-som touchscreen property sizes
	vxlan: fix error return code in vxlan_fdb_append
	cifs: Check the IOCB_DIRECT flag, not O_DIRECT
	net: atlantic: Avoid out-of-bounds indexing
	mt76: Fix undefined behavior due to shift overflowing the constant
	brcmfmac: sdio: Fix undefined behavior due to shift overflowing the constant
	dpaa_eth: Fix missing of_node_put in dpaa_get_ts_info()
	drm/msm/mdp5: check the return of kzalloc()
	net: macb: Restart tx only if queue pointer is lagging
	scsi: qedi: Fix failed disconnect handling
	stat: fix inconsistency between struct stat and struct compat_stat
	nvme: add a quirk to disable namespace identifiers
	nvme-pci: disable namespace identifiers for Qemu controllers
	EDAC/synopsys: Read the error count from the correct register
	mm, hugetlb: allow for "high" userspace addresses
	oom_kill.c: futex: delay the OOM reaper to allow time for proper futex cleanup
	mm/mmu_notifier.c: fix race in mmu_interval_notifier_remove()
	ata: pata_marvell: Check the 'bmdma_addr' beforing reading
	dma: at_xdmac: fix a missing check on list iterator
	net: atlantic: invert deep par in pm functions, preventing null derefs
	xtensa: patch_text: Fixup last cpu should be master
	xtensa: fix a7 clobbering in coprocessor context load/store
	openvswitch: fix OOB access in reserve_sfa_size()
	gpio: Request interrupts after IRQ is initialized
	ASoC: soc-dapm: fix two incorrect uses of list iterator
	e1000e: Fix possible overflow in LTR decoding
	ARC: entry: fix syscall_trace_exit argument
	arm_pmu: Validate single/group leader events
	sched/pelt: Fix attach_entity_load_avg() corner case
	perf/core: Fix perf_mmap fail when CONFIG_PERF_USE_VMALLOC enabled
	drm/panel/raspberrypi-touchscreen: Avoid NULL deref if not initialised
	drm/panel/raspberrypi-touchscreen: Initialise the bridge in prepare
	KVM: PPC: Fix TCE handling for VFIO
	drm/vc4: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync() usage
	powerpc/perf: Fix power9 event alternatives
	perf report: Set PERF_SAMPLE_DATA_SRC bit for Arm SPE event
	ext4: fix fallocate to use file_modified to update permissions consistently
	ext4: fix symlink file size not match to file content
	ext4: fix use-after-free in ext4_search_dir
	ext4: limit length to bitmap_maxbytes - blocksize in punch_hole
	ext4, doc: fix incorrect h_reserved size
	ext4: fix overhead calculation to account for the reserved gdt blocks
	ext4: force overhead calculation if the s_overhead_cluster makes no sense
	can: isotp: stop timeout monitoring when no first frame was sent
	jbd2: fix a potential race while discarding reserved buffers after an abort
	spi: atmel-quadspi: Fix the buswidth adjustment between spi-mem and controller
	staging: ion: Prevent incorrect reference counting behavour
	block/compat_ioctl: fix range check in BLKGETSIZE
	Revert "net: micrel: fix KS8851_MLL Kconfig"
	Linux 5.10.113

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I4ed10699cbb32b89caf79b8b4a2a35b3d8824115
This commit is contained in:
Greg Kroah-Hartman
2022-05-12 11:15:14 +02:00
94 changed files with 550 additions and 389 deletions

View File

@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
- Checksum of the extended attribute block.
* - 0x14
- \_\_u32
- h\_reserved[2]
- h\_reserved[3]
- Zero.
The checksum is calculated against the FS UUID, the 64-bit block number

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 112
SUBLEVEL = 113
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -199,6 +199,7 @@ tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
;POST Sys Call Ptrace Hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above

View File

@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
}
cluster = topology_physical_package_id(cpu_dev->id);
if (init_opp_table[cluster])
if (cluster < 0 || init_opp_table[cluster])
continue;
if (ve_init_opp_table(cpu_dev))

View File

@@ -89,12 +89,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
touchscreen-size-x = /bits/ 16 <4008>;
touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
touchscreen-size-y = /bits/ 16 <3864>;
touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
touchscreen-max-pressure = /bits/ 16 <255>;
touchscreen-average-samples = /bits/ 16 <10>;
touchscreen-max-pressure = <255>;
touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;

View File

@@ -70,12 +70,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
touchscreen-size-x = /bits/ 16 <4008>;
touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
touchscreen-size-y = /bits/ 16 <3864>;
touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
touchscreen-max-pressure = /bits/ 16 <255>;
touchscreen-average-samples = /bits/ 16 <10>;
touchscreen-max-pressure = <255>;
touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;

View File

@@ -522,13 +522,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
#define pmd_bad(pmd) (!pmd_table(pmd))
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
static inline bool pud_sect(pud_t pud) { return false; }
@@ -619,9 +618,9 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)

View File

@@ -421,13 +421,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
tbl[idx % TCES_PER_PAGE] = tce;
}
static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry)
static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
struct iommu_table *tbl, unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
unsigned long i;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
for (i = 0; i < subpages; ++i) {
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
}
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -486,6 +492,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
break;
}
iommu_tce_kill(tbl, io_entry, subpages);
return ret;
}
@@ -545,6 +553,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
break;
}
iommu_tce_kill(tbl, io_entry, subpages);
return ret;
}
@@ -591,10 +601,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
iommu_tce_kill(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
goto unlock_exit;
}
}
@@ -670,13 +679,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
if (get_user(tce, tces + i)) {
ret = H_TOO_HARD;
goto invalidate_exit;
goto unlock_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto invalidate_exit;
goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -685,19 +694,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
entry);
goto invalidate_exit;
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
entry + i);
goto unlock_exit;
}
}
kvmppc_tce_put(stt, entry + i, tce);
}
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill(stit->tbl, entry, npages);
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -736,20 +741,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
goto invalidate_exit;
return ret;
WARN_ON_ONCE(1);
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);

View File

@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
tbl->it_ops->tce_kill(tbl, entry, pages, true);
}
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry)
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
struct iommu_table *tbl, unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
unsigned long i;
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
for (i = 0; i < subpages; ++i) {
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
}
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
break;
}
iommu_tce_kill_rm(tbl, io_entry, subpages);
return ret;
}
@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
break;
}
iommu_tce_kill_rm(tbl, io_entry, subpages);
return ret;
}
@@ -424,10 +434,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
iommu_tce_kill_rm(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
return ret;
}
}
@@ -569,7 +577,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
ua = 0;
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto invalidate_exit;
goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -578,19 +586,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
entry);
goto invalidate_exit;
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
entry + i);
goto unlock_exit;
}
}
kvmppc_rm_tce_put(stt, entry + i, tce);
}
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill_rm(stit->tbl, entry, npages);
unlock_exit:
if (!prereg)
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
@@ -632,20 +636,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
goto invalidate_exit;
return ret;
WARN_ON_ONCE_RM(1);
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
invalidate_exit:
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
return ret;
}

View File

@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])

View File

@@ -29,15 +29,13 @@ typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
u32 st_dev;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_rdev;
u32 st_size;
u32 st_blksize;
u32 st_blocks;

View File

@@ -29,7 +29,7 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lsave_cp_regs_cp##x: \
xchal_cp##x##_store a2 a4 a5 a6 a7; \
xchal_cp##x##_store a2 a3 a4 a5 a6; \
jx a0; \
.endif
@@ -46,7 +46,7 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lload_cp_regs_cp##x: \
xchal_cp##x##_load a2 a4 a5 a6 a7; \
xchal_cp##x##_load a2 a3 a4 a5 a6; \
jx a0; \
.endif

View File

@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
{
struct patch *patch = data;
if (atomic_inc_return(&patch->cpu_count) == 1) {
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
local_patch_text(patch->addr, patch->data, patch->sz);
atomic_inc(&patch->cpu_count);
} else {

View File

@@ -679,7 +679,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
(bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL)
if ((size >> 9) > ~(compat_ulong_t)0)
return -EFBIG;
return compat_put_ulong(argp, size >> 9);

View File

@@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap)
switch(ap->port_no)
{
case 0:
if (!ap->ioaddr.bmdma_addr)
return ATA_CBL_PATA_UNK;
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;

View File

@@ -1390,7 +1390,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc;
struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
int residue, retry;
@@ -1505,11 +1505,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
* microblock.
*/
descs_list = &desc->descs_list;
list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
desc = iter;
break;
}
}
residue += cur_ubc << dwidth;

View File

@@ -1098,6 +1098,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
u64 xfer_size;
int rc;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -1132,6 +1135,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
u64 batch_size;
int rc;
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;

View File

@@ -1789,7 +1789,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
u32 reg, val, shift, num_map, i;
int ret = 0;
if (IS_ERR(np) || IS_ERR(gpr_np))
if (IS_ERR(np) || !gpr_np)
goto out;
event_remap = of_find_property(np, propname, NULL);
@@ -1837,7 +1837,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
}
out:
if (!IS_ERR(gpr_np))
if (gpr_np)
of_node_put(gpr_np);
return ret;

View File

@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
unsigned int status;
int ret;
ret = pm_runtime_get_sync(mtkd->ddev.dev);
ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
if (ret < 0) {
pm_runtime_put_noidle(chan->device->dev);
return ret;
@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
ret = readx_poll_timeout(readl, c->base + VFF_EN,
status, !status, 10, 100);
if (ret)
return ret;
goto err_pm;
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
if (ret < 0) {
dev_err(chan->device->dev, "Can't request dma IRQ\n");
return -EINVAL;
ret = -EINVAL;
goto err_pm;
}
if (mtkd->support_33bits)
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
err_pm:
pm_runtime_put_noidle(mtkd->ddev.dev);
return ret;
}

View File

@@ -163,6 +163,11 @@
#define ECC_STAT_CECNT_SHIFT 8
#define ECC_STAT_BITNUM_MASK 0x7F
/* ECC error count register definitions */
#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
#define ECC_ERRCNT_UECNT_SHIFT 16
#define ECC_ERRCNT_CECNT_MASK 0xFFFF
/* DDR QOS Interrupt register definitions */
#define DDR_QOS_IRQ_STAT_OFST 0x20200
#define DDR_QOSUE_MASK 0x4
@@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
base = priv->baseaddr;
p = &priv->stat;
regval = readl(base + ECC_ERRCNT_OFST);
p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
if (!p->ce_cnt)
goto ue_err;
regval = readl(base + ECC_STAT_OFST);
if (!regval)
return 1;
p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
if (!p->ce_cnt)
goto ue_err;
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
regval = readl(base + ECC_CEADDR0_OFST);

Some files were not shown because too many files have changed in this diff Show More