Merge 5.10.68 into android12-5.10-lts

Changes in 5.10.68
	drm/bridge: lt9611: Fix handling of 4k panels
	btrfs: fix upper limit for max_inline for page size 64K
	io_uring: ensure symmetry in handling iter types in loop_rw_iter()
	xen: reset legacy rtc flag for PV domU
	bnx2x: Fix enabling network interfaces without VFs
	arm64/sve: Use correct size when reinitialising SVE state
	PM: base: power: don't try to use non-existing RTC for storing data
	PCI: Add AMD GPU multi-function power dependencies
	drm/amd/amdgpu: Increase HWIP_MAX_INSTANCE to 10
	drm/etnaviv: return context from etnaviv_iommu_context_get
	drm/etnaviv: put submit prev MMU context when it exists
	drm/etnaviv: stop abusing mmu_context as FE running marker
	drm/etnaviv: keep MMU context across runtime suspend/resume
	drm/etnaviv: exec and MMU state is lost when resetting the GPU
	drm/etnaviv: fix MMU context leak on GPU reset
	drm/etnaviv: reference MMU context when setting up hardware state
	drm/etnaviv: add missing MMU context put when reaping MMU mapping
	s390/sclp: fix Secure-IPL facility detection
	x86/pat: Pass valid address to sanitize_phys()
	x86/mm: Fix kern_addr_valid() to cope with existing but not present entries
	tipc: fix an use-after-free issue in tipc_recvmsg
	ethtool: Fix rxnfc copy to user buffer overflow
	net/{mlx5|nfp|bnxt}: Remove unnecessary RTNL lock assert
	net-caif: avoid user-triggerable WARN_ON(1)
	ptp: dp83640: don't define PAGE0
	dccp: don't duplicate ccid when cloning dccp sock
	net/l2tp: Fix reference count leak in l2tp_udp_recv_core
	r6040: Restore MDIO clock frequency after MAC reset
	tipc: increase timeout in tipc_sk_enqueue()
	drm/rockchip: cdn-dp-core: Make cdn_dp_core_resume __maybe_unused
	perf machine: Initialize srcline string member in add_location struct
	net/mlx5: FWTrace, cancel work on alloc pd error flow
	net/mlx5: Fix potential sleeping in atomic context
	nvme-tcp: fix io_work priority inversion
	events: Reuse value read using READ_ONCE instead of re-reading it
	net: ipa: initialize all filter table slots
	gen_compile_commands: fix missing 'sys' package
	vhost_net: fix OoB on sendmsg() failure.
	net/af_unix: fix a data-race in unix_dgram_poll
	net: dsa: destroy the phylink instance on any error in dsa_slave_phy_setup
	x86/uaccess: Fix 32-bit __get_user_asm_u64() when CC_HAS_ASM_GOTO_OUTPUT=y
	tcp: fix tp->undo_retrans accounting in tcp_sacktag_one()
	selftest: net: fix typo in altname test
	qed: Handle management FW error
	udp_tunnel: Fix udp_tunnel_nic work-queue type
	dt-bindings: arm: Fix Toradex compatible typo
	ibmvnic: check failover_pending in login response
	KVM: PPC: Book3S HV: Tolerate treclaim. in fake-suspend mode changing registers
	bnxt_en: make bnxt_free_skbs() safe to call after bnxt_free_mem()
	net: hns3: pad the short tunnel frame before sending to hardware
	net: hns3: change affinity_mask to numa node range
	net: hns3: disable mac in flr process
	net: hns3: fix the timing issue of VF clearing interrupt sources
	mm/memory_hotplug: use "unsigned long" for PFN in zone_for_pfn_range()
	dt-bindings: mtd: gpmc: Fix the ECC bytes vs. OOB bytes equation
	mfd: db8500-prcmu: Adjust map to reality
	PCI: Add ACS quirks for NXP LX2xx0 and LX2xx2 platforms
	fuse: fix use after free in fuse_read_interrupt()
	PCI: tegra194: Fix handling BME_CHGED event
	PCI: tegra194: Fix MSI-X programming
	PCI: tegra: Fix OF node reference leak
	mfd: Don't use irq_create_mapping() to resolve a mapping
	PCI: rcar: Fix runtime PM imbalance in rcar_pcie_ep_probe()
	tracing/probes: Reject events which have the same name of existing one
	PCI: cadence: Use bitfield for *quirk_retrain_flag* instead of bool
	PCI: cadence: Add quirk flag to set minimum delay in LTSSM Detect.Quiet state
	PCI: j721e: Add PCIe support for J7200
	PCI: j721e: Add PCIe support for AM64
	PCI: Add ACS quirks for Cavium multi-function devices
	watchdog: Start watchdog in watchdog_set_last_hw_keepalive only if appropriate
	octeontx2-af: Add additional register check to rvu_poll_reg()
	Set fc_nlinfo in nh_create_ipv4, nh_create_ipv6
	net: usb: cdc_mbim: avoid altsetting toggling for Telit LN920
	block, bfq: honor already-setup queue merges
	PCI: ibmphp: Fix double unmap of io_mem
	ethtool: Fix an error code in cxgb2.c
	NTB: Fix an error code in ntb_msit_probe()
	NTB: perf: Fix an error code in perf_setup_inbuf()
	s390/bpf: Fix optimizing out zero-extensions
	s390/bpf: Fix 64-bit subtraction of the -0x80000000 constant
	s390/bpf: Fix branch shortening during codegen pass
	mfd: axp20x: Update AXP288 volatile ranges
	backlight: ktd253: Stabilize backlight
	PCI: of: Don't fail devm_pci_alloc_host_bridge() on missing 'ranges'
	PCI: iproc: Fix BCMA probe resource handling
	netfilter: Fix fall-through warnings for Clang
	netfilter: nft_ct: protect nft_ct_pcpu_template_refcnt with mutex
	KVM: arm64: Restrict IPA size to maximum 48 bits on 4K and 16K page size
	PCI: Fix pci_dev_str_match_path() alloc while atomic bug
	mfd: tqmx86: Clear GPIO IRQ resource when no IRQ is set
	tracing/boot: Fix a hist trigger dependency for boot time tracing
	mtd: mtdconcat: Judge callback existence based on the master
	mtd: mtdconcat: Check _read, _write callbacks existence before assignment
	KVM: arm64: Fix read-side race on updates to vcpu reset state
	KVM: arm64: Handle PSCI resets before userspace touches vCPU state
	PCI: Sync __pci_register_driver() stub for CONFIG_PCI=n
	mtd: rawnand: cafe: Fix a resource leak in the error handling path of 'cafe_nand_probe()'
	ARC: export clear_user_page() for modules
	perf unwind: Do not overwrite FEATURE_CHECK_LDFLAGS-libunwind-{x86,aarch64}
	perf bench inject-buildid: Handle writen() errors
	gpio: mpc8xxx: Fix a resources leak in the error handling path of 'mpc8xxx_probe()'
	gpio: mpc8xxx: Use 'devm_gpiochip_add_data()' to simplify the code and avoid a leak
	net: dsa: tag_rtl4_a: Fix egress tags
	selftests: mptcp: clean tmp files in simult_flows
	net: hso: add failure handler for add_net_device
	net: dsa: b53: Fix calculating number of switch ports
	net: dsa: b53: Set correct number of ports in the DSA struct
	netfilter: socket: icmp6: fix use-after-scope
	fq_codel: reject silly quantum parameters
	qlcnic: Remove redundant unlock in qlcnic_pinit_from_rom
	ip_gre: validate csum_start only on pull
	net: dsa: b53: Fix IMP port setup on BCM5301x
	bnxt_en: fix stored FW_PSID version masks
	bnxt_en: Fix asic.rev in devlink dev info command
	bnxt_en: log firmware debug notifications
	bnxt_en: Consolidate firmware reset event logging.
	bnxt_en: Convert to use netif_level() helpers.
	bnxt_en: Improve logging of error recovery settings information.
	bnxt_en: Fix possible unintended driver initiated error recovery
	mfd: lpc_sch: Partially revert "Add support for Intel Quark X1000"
	mfd: lpc_sch: Rename GPIOBASE to prevent build error
	net: renesas: sh_eth: Fix freeing wrong tx descriptor
	x86/mce: Avoid infinite loop for copy from user recovery
	bnxt_en: Fix error recovery regression
	net: dsa: bcm_sf2: Fix array overrun in bcm_sf2_num_active_ports()
	Linux 5.10.68

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I542f48f8de516dcabce91d3d399583483aba0da7
This commit is contained in:
Greg Kroah-Hartman
2021-09-30 18:35:35 +02:00
124 changed files with 903 additions and 418 deletions

View File

@@ -54,7 +54,7 @@ properties:
- const: toradex,apalis_t30
- const: nvidia,tegra30
- items:
- const: toradex,apalis_t30-eval-v1.1
- const: toradex,apalis_t30-v1.1-eval
- const: toradex,apalis_t30-eval
- const: toradex,apalis_t30-v1.1
- const: toradex,apalis_t30

View File

@@ -122,7 +122,7 @@ on various other factors also like;
so the device should have enough free bytes available its OOB/Spare
area to accommodate ECC for entire page. In general following expression
helps in determining if given device can accommodate ECC syndrome:
"2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
"2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
where
OOBSIZE number of bytes in OOB/spare area
PAGESIZE number of bytes in main-area of device page

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 67
SUBLEVEL = 68
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
clear_page(to);
clear_bit(PG_dc_clean, &page->flags);
}
EXPORT_SYMBOL(clear_user_page);
/**********************************************************************
* Explicit Cache flush request from user space via syscall

View File

@@ -510,7 +510,7 @@ size_t sve_state_size(struct task_struct const *task)
void sve_alloc(struct task_struct *task)
{
if (task->thread.sve_state) {
memset(task->thread.sve_state, 0, sve_state_size(current));
memset(task->thread.sve_state, 0, sve_state_size(task));
return;
}

View File

@@ -1186,6 +1186,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&reg, argp, sizeof(reg)))
break;
/*
* We could owe a reset due to PSCI. Handle the pending reset
* here to ensure userspace register accesses are ordered after
* the reset.
*/
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
kvm_reset_vcpu(vcpu);
if (ioctl == KVM_SET_ONE_REG)
r = kvm_arm_set_reg(vcpu, &reg);
else

View File

@@ -206,10 +206,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_reset_state reset_state;
int ret;
bool loaded;
u32 pstate;
mutex_lock(&vcpu->kvm->lock);
reset_state = vcpu->arch.reset_state;
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
mutex_unlock(&vcpu->kvm->lock);
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
@@ -272,8 +278,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
if (vcpu->arch.reset_state.reset) {
unsigned long target_pc = vcpu->arch.reset_state.pc;
if (reset_state.reset) {
unsigned long target_pc = reset_state.pc;
/* Gracefully handle Thumb2 entry point */
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
@@ -282,13 +288,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
/* Propagate caller endianness */
if (vcpu->arch.reset_state.be)
if (reset_state.be)
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu->arch.reset_state.reset = false;
vcpu_set_reg(vcpu, 0, reset_state.r0);
}
/* Reset timer */
@@ -313,6 +317,14 @@ int kvm_set_ipa_limit(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT);
/*
* IPA size beyond 48 bits could not be supported
* on either 4K or 16K page size. Hence let's cap
* it to 48 bits, in case it's reported as larger
* on the system.
*/
if (PAGE_SIZE != SZ_64K)
parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
/*
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at

View File

@@ -3146,7 +3146,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
/* The following code handles the fake_suspend = 1 case */
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -PPC_MIN_STKFRM(r1)
stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */
mfmsr r8
@@ -3161,10 +3161,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
/*
* It's possible that treclaim. may modify registers, if we have lost
* track of fake-suspend state in the guest due to it using rfscv.
* Save and restore registers in case this occurs.
*/
mfspr r3, SPRN_DSCR
mfspr r4, SPRN_XER
mfspr r5, SPRN_AMR
/* SPRN_TAR would need to be saved here if the kernel ever used it */
mfcr r12
SAVE_NVGPRS(r1)
SAVE_GPR(2, r1)
SAVE_GPR(3, r1)
SAVE_GPR(4, r1)
SAVE_GPR(5, r1)
stw r12, 8(r1)
std r1, HSTATE_HOST_R1(r13)
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
GET_PACA(r13)
ld r1, HSTATE_HOST_R1(r13)
REST_GPR(2, r1)
REST_GPR(3, r1)
REST_GPR(4, r1)
REST_GPR(5, r1)
lwz r12, 8(r1)
REST_NVGPRS(r1)
mtspr SPRN_DSCR, r3
mtspr SPRN_XER, r4
mtspr SPRN_AMR, r5
mtcr r12
HMT_MEDIUM
/*
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
@@ -3192,7 +3224,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
addi r1, r1, PPC_MIN_STKFRM
addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr

View File

@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
/* Branch instruction needs 6 bytes */ \
int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9080000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
if (!imm)
break;
/* alfi %dst,imm */
EMIT6_IMM(0xc20b0000, dst_reg, imm);
if (imm != 0) {
/* alfi %dst,imm */
EMIT6_IMM(0xc20b0000, dst_reg, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9090000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
if (!imm)
break;
/* alfi %dst,-imm */
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
if (imm != 0) {
/* alfi %dst,-imm */
EMIT6_IMM(0xc20b0000, dst_reg, -imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
if (!imm)
break;
/* agfi %dst,-imm */
EMIT6_IMM(0xc2080000, dst_reg, -imm);
if (imm == -0x80000000) {
/* algfi %dst,0x80000000 */
EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
} else {
/* agfi %dst,-imm */
EMIT6_IMM(0xc2080000, dst_reg, -imm);
}
break;
/*
* BPF_MUL
@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb90c0000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
if (imm == 1)
break;
/* msfi %r5,imm */
EMIT6_IMM(0xc2010000, dst_reg, imm);
if (imm != 1) {
/* msfi %r5,imm */
EMIT6_IMM(0xc2010000, dst_reg, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0);
else
EMIT_ZERO(dst_reg);
break;
}
/* lhi %w0,0 */
@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9820000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
if (!imm)
break;
/* xilf %dst,imm */
EMIT6_IMM(0xc0070000, dst_reg, imm);
if (imm != 0) {
/* xilf %dst,imm */
EMIT6_IMM(0xc0070000, dst_reg, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
if (imm == 0)
break;
/* sll %dst,imm(%r0) */
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
if (imm != 0) {
/* sll %dst,imm(%r0) */
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
if (imm == 0)
break;
/* srl %dst,imm(%r0) */
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
if (imm != 0) {
/* srl %dst,imm(%r0) */
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
if (imm == 0)
break;
/* sra %dst,imm(%r0) */
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
if (imm != 0) {
/* sra %dst,imm(%r0) */
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
}
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */

View File

@@ -301,8 +301,8 @@ do { \
unsigned int __gu_low, __gu_high; \
const unsigned int __user *__gu_ptr; \
__gu_ptr = (const void __user *)(ptr); \
__get_user_asm(__gu_low, ptr, "l", "=r", label); \
__get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
(x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
} while (0)
#else

View File

@@ -1241,6 +1241,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
static void kill_me_now(struct callback_head *ch)
{
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
p->mce_count = 0;
force_sig(SIGBUS);
}
@@ -1249,6 +1252,7 @@ static void kill_me_maybe(struct callback_head *cb)
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
int flags = MF_ACTION_REQUIRED;
p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
@@ -1269,17 +1273,34 @@ static void kill_me_maybe(struct callback_head *cb)
}
}
static void queue_task_work(struct mce *m, int kill_it)
static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{
current->mce_addr = m->addr;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
int count = ++current->mce_count;
if (kill_it)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
/* First call, save all the details */
if (count == 1) {
current->mce_addr = m->addr;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
}
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
if (count > 10)
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
/* Second or later call, make sure page address matches the one from first call */
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
mce_panic("Consecutive machine checks to different user pages", m, msg);
/* Do not call task_work_add() more than once */
if (count > 1)
return;
task_work_add(current, &current->mce_kill_me, TWA_RESUME);
}
@@ -1427,7 +1448,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
queue_task_work(&m, kill_it);
queue_task_work(&m, msg, kill_it);
} else {
/*
@@ -1445,7 +1466,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
}
if (m.kflags & MCE_IN_KERNEL_COPYIN)
queue_task_work(&m, kill_it);
queue_task_work(&m, msg, kill_it);
}
out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);

View File

@@ -1389,18 +1389,18 @@ int kern_addr_valid(unsigned long addr)
return 0;
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
if (!p4d_present(*p4d))
return 0;
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))

View File

@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
int err = 0;
start = sanitize_phys(start);
end = sanitize_phys(end);
/*
* The end address passed into this function is exclusive, but
* sanitize_phys() expects an inclusive address.
*/
end = sanitize_phys(end - 1) + 1;
if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));

View File

@@ -1204,6 +1204,11 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1;
}
static void __init xen_domu_set_legacy_features(void)
{
x86_platform.legacy.rtc = 0;
}
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
@@ -1356,6 +1361,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL);
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
x86_platform.set_legacy_features =
xen_domu_set_legacy_features;
} else {
const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info +

View File

@@ -2526,6 +2526,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
* are likely to increase the throughput.
*/
bfqq->new_bfqq = new_bfqq;
/*
* The above assignment schedules the following redirections:
* each time some I/O for bfqq arrives, the process that
* generated that I/O is disassociated from bfqq and
* associated with new_bfqq. Here we increases new_bfqq->ref
* in advance, adding the number of processes that are
* expected to be associated with new_bfqq as they happen to
* issue I/O.
*/
new_bfqq->ref += process_refs;
return new_bfqq;
}
@@ -2585,6 +2594,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
struct bfq_queue *in_service_bfqq, *new_bfqq;
/* if a merge has already been setup, then proceed with that first */
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
/*
* Do not perform queue merging if the device is non
* rotational and performs internal queueing. In fact, such a
@@ -2639,9 +2652,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfq_too_late_for_merging(bfqq))
return NULL;
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
return NULL;

View File

@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
if (!x86_platform.legacy.rtc)
return;
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
static int __init early_resume_init(void)
{
if (!x86_platform.legacy.rtc)
return 0;
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
if (!x86_platform.legacy.rtc)
return 0;
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;

View File

@@ -374,7 +374,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
of_device_is_compatible(np, "fsl,ls1088a-gpio"))
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
ret = gpiochip_add_data(gc, mpc8xxx_gc);
ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
if (ret) {
pr_err("%pOF: GPIO chip registration failed with status %d\n",
np, ret);
@@ -406,6 +406,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return 0;
err:
if (mpc8xxx_gc->irq)
irq_domain_remove(mpc8xxx_gc->irq);
iounmap(mpc8xxx_gc->regs);
return ret;
}
@@ -419,7 +421,6 @@ static int mpc8xxx_remove(struct platform_device *pdev)
irq_domain_remove(mpc8xxx_gc->irq);
}
gpiochip_remove(&mpc8xxx_gc->gc);
iounmap(mpc8xxx_gc->regs);
return 0;

View File

@@ -717,7 +717,7 @@ enum amd_hw_ip_block_type {
MAX_HWIP
};
#define HWIP_MAX_INSTANCE 8
#define HWIP_MAX_INSTANCE 10
struct amd_powerplay {
void *pp_handle;

View File

@@ -867,8 +867,14 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
return lt9611_mode ? MODE_OK : MODE_BAD;
if (!lt9611_mode)
return MODE_BAD;
else if (lt9611_mode->intfs > 1 && !lt9611->dsi1)
return MODE_PANEL;
else
return MODE_OK;
}
static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)

View File

@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context;
etnaviv_iommu_context_get(mmu_context);
gpu->mmu_context = mmu_context;
gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
etnaviv_iommu_context_put(old_context);
}

Some files were not shown because too many files have changed in this diff Show More