Merge 5.10.84 into android12-5.10-lts

Changes in 5.10.84
	NFSv42: Fix pagecache invalidation after COPY/CLONE
	can: j1939: j1939_tp_cmd_recv(): check the dst address of TP.CM_BAM
	ovl: simplify file splice
	ovl: fix deadlock in splice write
	gfs2: release iopen glock early in evict
	gfs2: Fix length of holes reported at end-of-file
	powerpc/pseries/ddw: Revert "Extend upper limit for huge DMA window for persistent memory"
	drm/sun4i: fix unmet dependency on RESET_CONTROLLER for PHY_SUN6I_MIPI_DPHY
	mac80211: do not access the IV when it was stripped
	net/smc: Transfer remaining wait queue entries during fallback
	atlantic: Fix OOB read and write in hw_atl_utils_fw_rpc_wait
	net: return correct error code
	platform/x86: thinkpad_acpi: Add support for dual fan control
	platform/x86: thinkpad_acpi: Fix WWAN device disabled issue after S3 deep
	s390/setup: avoid using memblock_enforce_memory_limit
	btrfs: check-integrity: fix a warning on write caching disabled disk
	thermal: core: Reset previous low and high trip during thermal zone init
	scsi: iscsi: Unblock session then wake up error handler
	drm/amd/amdkfd: Fix kernel panic when reset failed and been triggered again
	drm/amd/amdgpu: fix potential memleak
	ata: ahci: Add Green Sardine vendor ID as board_ahci_mobile
	ethernet: hisilicon: hns: hns_dsaf_misc: fix a possible array overflow in hns_dsaf_ge_srst_by_port()
	ipv6: check return value of ipv6_skip_exthdr
	net: tulip: de4x5: fix the problem that the array 'lp->phy[8]' may be out of bound
	net: ethernet: dec: tulip: de4x5: fix possible array overflows in type3_infoblock()
	perf inject: Fix ARM SPE handling
	perf hist: Fix memory leak of a perf_hpp_fmt
	perf report: Fix memory leaks around perf_tip()
	net/smc: Avoid warning of possible recursive locking
	ACPI: Add stubs for wakeup handler functions
	vrf: Reset IPCB/IP6CB when processing outbound pkts in vrf dev xmit
	kprobes: Limit max data_size of the kretprobe instances
	rt2x00: do not mark device gone on EPROTO errors during start
	ipmi: Move remove_work to dedicated workqueue
	cpufreq: Fix get_cpu_device() failure in add_cpu_dev_symlink()
	s390/pci: move pseudo-MMIO to prevent MIO overlap
	fget: check that the fd still exists after getting a ref to it
	sata_fsl: fix UAF in sata_fsl_port_stop when rmmod sata_fsl
	sata_fsl: fix warning in remove_proc_entry when rmmod sata_fsl
	ipv6: fix memory leak in fib6_rule_suppress
	drm/amd/display: Allow DSC on supported MST branch devices
	KVM: Disallow user memslot with size that exceeds "unsigned long"
	KVM: nVMX: Flush current VPID (L1 vs. L2) for KVM_REQ_TLB_FLUSH_GUEST
	KVM: x86: Use a stable condition around all VT-d PI paths
	KVM: arm64: Avoid setting the upper 32 bits of TCR_EL2 and CPTR_EL2 to 1
	KVM: X86: Use vcpu->arch.walk_mmu for kvm_mmu_invlpg()
	tracing/histograms: String compares should not care about signed values
	wireguard: selftests: increase default dmesg log size
	wireguard: allowedips: add missing __rcu annotation to satisfy sparse
	wireguard: selftests: actually test for routing loops
	wireguard: selftests: rename DEBUG_PI_LIST to DEBUG_PLIST
	wireguard: device: reset peer src endpoint when netns exits
	wireguard: receive: use ring buffer for incoming handshakes
	wireguard: receive: drop handshakes if queue lock is contended
	wireguard: ratelimiter: use kvcalloc() instead of kvzalloc()
	i2c: stm32f7: flush TX FIFO upon transfer errors
	i2c: stm32f7: recover the bus on access timeout
	i2c: stm32f7: stop dma transfer in case of NACK
	i2c: cbus-gpio: set atomic transfer callback
	natsemi: xtensa: fix section mismatch warnings
	tcp: fix page frag corruption on page fault
	net: qlogic: qlcnic: Fix a NULL pointer dereference in qlcnic_83xx_add_rings()
	net: mpls: Fix notifications when deleting a device
	siphash: use _unaligned version by default
	arm64: ftrace: add missing BTIs
	net/mlx4_en: Fix an use-after-free bug in mlx4_en_try_alloc_resources()
	selftests: net: Correct case name
	mt76: mt7915: fix NULL pointer dereference in mt7915_get_phy_mode
	ASoC: tegra: Fix wrong value type in ADMAIF
	ASoC: tegra: Fix wrong value type in I2S
	ASoC: tegra: Fix wrong value type in DMIC
	ASoC: tegra: Fix wrong value type in DSPK
	ASoC: tegra: Fix kcontrol put callback in ADMAIF
	ASoC: tegra: Fix kcontrol put callback in I2S
	ASoC: tegra: Fix kcontrol put callback in DMIC
	ASoC: tegra: Fix kcontrol put callback in DSPK
	ASoC: tegra: Fix kcontrol put callback in AHUB
	rxrpc: Fix rxrpc_peer leak in rxrpc_look_up_bundle()
	rxrpc: Fix rxrpc_local leak in rxrpc_lookup_peer()
	ALSA: intel-dsp-config: add quirk for CML devices based on ES8336 codec
	net: usb: lan78xx: lan78xx_phy_init(): use PHY_POLL instead of "0" if no IRQ is available
	net: marvell: mvpp2: Fix the computation of shared CPUs
	dpaa2-eth: destroy workqueue at the end of remove function
	net: annotate data-races on txq->xmit_lock_owner
	ipv4: convert fib_num_tclassid_users to atomic_t
	net/smc: fix wrong list_del in smc_lgr_cleanup_early
	net/rds: correct socket tunable error in rds_tcp_tune()
	net/smc: Keep smc_close_final rc during active close
	drm/msm/a6xx: Allocate enough space for GMU registers
	drm/msm: Do hw_init() before capturing GPU state
	atlantic: Increase delay for fw transactions
	atlatnic: enable Nbase-t speeds with base-t
	atlantic: Fix to display FW bundle version instead of FW mac version.
	atlantic: Add missing DIDs and fix 115c.
	Remove Half duplex mode speed capabilities.
	atlantic: Fix statistics logic for production hardware
	atlantic: Remove warn trace message.
	KVM: x86/pmu: Fix reserved bits for AMD PerfEvtSeln register
	KVM: VMX: Set failure code in prepare_vmcs02()
	x86/sev: Fix SEV-ES INS/OUTS instructions for word, dword, and qword
	x86/entry: Use the correct fence macro after swapgs in kernel CR3
	x86/xen: Add xenpv_restore_regs_and_return_to_usermode()
	sched/uclamp: Fix rq->uclamp_max not set on first enqueue
	x86/pv: Switch SWAPGS to ALTERNATIVE
	x86/entry: Add a fence for kernel entry SWAPGS in paranoid_entry()
	parisc: Fix KBUILD_IMAGE for self-extracting kernel
	parisc: Fix "make install" on newer debian releases
	vgacon: Propagate console boot parameters before calling `vc_resize'
	xhci: Fix commad ring abort, write all 64 bits to CRCR register.
	USB: NO_LPM quirk Lenovo Powered USB-C Travel Hub
	usb: typec: tcpm: Wait in SNK_DEBOUNCED until disconnect
	x86/tsc: Add a timer to make sure TSC_adjust is always checked
	x86/tsc: Disable clocksource watchdog for TSC on qualified platorms
	x86/64/mm: Map all kernel memory into trampoline_pgd
	tty: serial: msm_serial: Deactivate RX DMA for polling support
	serial: pl011: Add ACPI SBSA UART match id
	serial: tegra: Change lower tolerance baud rate limit for tegra20 and tegra30
	serial: core: fix transmit-buffer reset and memleak
	serial: 8250_pci: Fix ACCES entries in pci_serial_quirks array
	serial: 8250_pci: rewrite pericom_do_set_divisor()
	serial: 8250: Fix RTS modem control while in rs485 mode
	iwlwifi: mvm: retry init flow if failed
	parisc: Mark cr16 CPU clocksource unstable on all SMP machines
	net/tls: Fix authentication failure in CCM mode
	ipmi: msghandler: Make symbol 'remove_work_wq' static
	Linux 5.10.84

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iad592da28c6425dea7dca35b229d14c44edb412d
This commit is contained in:
Greg Kroah-Hartman
2021-12-08 09:41:05 +01:00
141 changed files with 1740 additions and 696 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 83
SUBLEVEL = 84
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -84,7 +84,7 @@
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS_SHIFT 16
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
@@ -269,7 +269,7 @@
#define CPTR_EL2_TFP_SHIFT 10
/* Hyp Coprocessor Trap Register */
#define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TCPAC (1U << 31)
#define CPTR_EL2_TAM (1 << 30)
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)

View File

@@ -77,11 +77,17 @@
.endm
SYM_CODE_START(ftrace_regs_caller)
#ifdef BTI_C
BTI_C
#endif
ftrace_regs_entry 1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
SYM_CODE_START(ftrace_caller)
#ifdef BTI_C
BTI_C
#endif
ftrace_regs_entry 0
b ftrace_common
SYM_CODE_END(ftrace_caller)

View File

@@ -17,7 +17,12 @@
# Mike Shaver, Helge Deller and Martin K. Petersen
#
ifdef CONFIG_PARISC_SELF_EXTRACT
boot := arch/parisc/boot
KBUILD_IMAGE := $(boot)/bzImage
else
KBUILD_IMAGE := vmlinuz
endif
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1

View File

@@ -39,6 +39,7 @@ verify "$3"
if [ -n "${INSTALLKERNEL}" ]; then
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
fi
# Default install

View File

@@ -252,27 +252,13 @@ void __init time_init(void)
static int __init init_cr16_clocksource(void)
{
/*
* The cr16 interval timers are not syncronized across CPUs on
* different sockets, so mark them unstable and lower rating on
* multi-socket SMP systems.
* The cr16 interval timers are not syncronized across CPUs, even if
* they share the same socket.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
int cpu;
unsigned long cpu0_loc;
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
for_each_online_cpu(cpu) {
if (cpu == 0)
continue;
if ((cpu0_loc != 0) &&
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
continue;
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
break;
}
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
}
/* XXX: We may want to mark sched_clock stable here if cr16 clocks are

View File

@@ -1034,15 +1034,6 @@ static phys_addr_t ddw_memory_hotplug_max(void)
phys_addr_t max_addr = memory_hotplug_max();
struct device_node *memory;
/*
* The "ibm,pmemory" can appear anywhere in the address space.
* Assuming it is still backed by page structs, set the upper limit
* for the huge DMA window as MAX_PHYSMEM_BITS.
*/
if (of_find_node_by_type(NULL, "ibm,pmemory"))
return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
(phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);
for_each_node_by_type(memory, "memory") {
unsigned long start, size;
int n_mem_addr_cells, n_mem_size_cells, len;

View File

@@ -14,12 +14,13 @@
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
#define ZPCI_IOMAP_ADDR_SHIFT 62
#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
#define ZPCI_IOMAP_MAX_ENTRIES \
((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
(1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
#define ZPCI_IOMAP_ADDR_IDX_MASK \
(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
struct zpci_iomap_entry {
u32 fh;

View File

@@ -845,9 +845,6 @@ static void __init setup_memory(void)
storage_key_init_range(start, end);
psw_set_key(PAGE_DEFAULT_KEY);
/* Only cosmetics */
memblock_enforce_memory_limit(memblock_end_of_DRAM());
}
/*

View File

@@ -575,6 +575,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
ud2
1:
#endif
#ifdef CONFIG_XEN_PV
ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
#endif
POP_REGS pop_rdi=0
/*
@@ -669,7 +673,7 @@ native_irq_return_ldt:
*/
pushq %rdi /* Stash user RDI */
SWAPGS /* to kernel GS */
swapgs /* to kernel GS */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
movq PER_CPU_VAR(espfix_waddr), %rdi
@@ -699,7 +703,7 @@ native_irq_return_ldt:
orq PER_CPU_VAR(espfix_stack), %rax
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
SWAPGS /* to user GS */
swapgs /* to user GS */
popq %rdi /* Restore user RDI */
movq %rax, %rsp
@@ -932,6 +936,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
.Lparanoid_entry_checkgs:
/* EBX = 1 -> kernel GSBASE active, no restore required */
movl $1, %ebx
/*
* The kernel-enforced convention is a negative GSBASE indicates
* a kernel value. No SWAPGS needed on entry and exit.
@@ -939,21 +944,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
movl $MSR_GS_BASE, %ecx
rdmsr
testl %edx, %edx
jns .Lparanoid_entry_swapgs
ret
.Lparanoid_entry_swapgs:
SWAPGS
/*
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
* unconditional CR3 write, even in the PTI case. So do an lfence
* to prevent GS speculation, regardless of whether PTI is enabled.
*/
FENCE_SWAPGS_KERNEL_ENTRY
js .Lparanoid_kernel_gsbase
/* EBX = 0 -> SWAPGS required on exit */
xorl %ebx, %ebx
swapgs
.Lparanoid_kernel_gsbase:
FENCE_SWAPGS_KERNEL_ENTRY
ret
SYM_CODE_END(paranoid_entry)
@@ -1001,7 +999,7 @@ SYM_CODE_START_LOCAL(paranoid_exit)
jnz restore_regs_and_return_to_kernel
/* We are returning to a context with user GSBASE */
SWAPGS_UNSAFE_STACK
swapgs
jmp restore_regs_and_return_to_kernel
SYM_CODE_END(paranoid_exit)
@@ -1035,11 +1033,6 @@ SYM_CODE_START_LOCAL(error_entry)
pushq %r12
ret
.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
.Lerror_entry_done:
ret
/*
* There are two places in the kernel that can potentially fault with
* usergs. Handle them here. B stepping K8s sometimes report a
@@ -1062,8 +1055,14 @@ SYM_CODE_START_LOCAL(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
FENCE_SWAPGS_USER_ENTRY
jmp .Lerror_entry_done
/*
* Issue an LFENCE to prevent GS speculation, regardless of whether it is a
* kernel or user gsbase.
*/
.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
ret
.Lbstep_iret:
/* Fix truncated RIP */
@@ -1426,7 +1425,7 @@ nmi_no_fsgsbase:
jnz nmi_restore
nmi_swapgs:
SWAPGS_UNSAFE_STACK
swapgs
nmi_restore:
POP_REGS

View File

@@ -131,18 +131,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
#define SAVE_FLAGS(x) pushfq; popq %rax
#endif
#define SWAPGS swapgs
/*
* Currently paravirt can't handle swapgs nicely when we
* don't have a stack we can rely on (such as a user space
* stack). So we either find a way around these or just fault
* and emulate if a guest tries to call swapgs directly.
*
* Either way, this is a good way to document that we don't
* have a reliable stack. x86_64 only.
*/
#define SWAPGS_UNSAFE_STACK swapgs
#define INTERRUPT_RETURN jmp native_iret
#define USERGS_SYSRET64 \
swapgs; \
@@ -170,6 +158,14 @@ static __always_inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(flags);
}
#else
#ifdef CONFIG_X86_64
#ifdef CONFIG_XEN_PV
#define SWAPGS ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
#else
#define SWAPGS swapgs
#endif
#endif
#endif /* !__ASSEMBLY__ */
#endif

View File

@@ -776,26 +776,6 @@ extern void default_banner(void);
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
/*
* If swapgs is used while the userspace stack is still current,
* there's no way to call a pvop. The PV replacement *must* be
* inlined, or the swapgs instruction must be trapped and emulated.
*/
#define SWAPGS_UNSAFE_STACK \
PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
/*
* Note: swapgs is very special, and in practise is either going to be
* implemented with a single "swapgs" instruction or something very
* special. Either way, we don't need to save any registers for
* it.
*/
#define SWAPGS \
PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
)
#define USERGS_SYSRET64 \
PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
ANNOTATE_RETPOLINE_SAFE; \

View File

@@ -169,8 +169,6 @@ struct pv_cpu_ops {
frame set up. */
void (*iret)(void);
void (*swapgs)(void);
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
#endif

View File

@@ -15,7 +15,6 @@ int main(void)
#ifdef CONFIG_PARAVIRT_XXL
OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template,
cpu.usergs_sysret64);
OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs);
#ifdef CONFIG_DEBUG_ENTRY
OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl);
#endif

View File

@@ -312,7 +312,6 @@ struct paravirt_patch_template pv_ops = {
.cpu.usergs_sysret64 = native_usergs_sysret64,
.cpu.iret = native_iret,
.cpu.swapgs = native_swapgs,
#ifdef CONFIG_X86_IOPL_IOPERM
.cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,

View File

@@ -28,7 +28,6 @@ struct patch_xxl {
const unsigned char irq_restore_fl[2];
const unsigned char cpu_wbinvd[2];
const unsigned char cpu_usergs_sysret64[6];
const unsigned char cpu_swapgs[3];
const unsigned char mov64[3];
};
@@ -43,7 +42,6 @@ static const struct patch_xxl patch_data_xxl = {
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
.cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
0x48, 0x0f, 0x07 }, // swapgs; sysretq
.cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
};
@@ -86,7 +84,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
#endif

View File

@@ -260,11 +260,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
char *dst, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
char __user *target = (char __user *)dst;
u64 d8;
u32 d4;
u16 d2;
u8 d1;
/*
* This function uses __put_user() independent of whether kernel or user
@@ -286,26 +281,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
case 1: {
u8 d1;
u8 __user *target = (u8 __user *)dst;
memcpy(&d1, buf, 1);
if (__put_user(d1, target))
goto fault;
break;
case 2:
}
case 2: {
u16 d2;
u16 __user *target = (u16 __user *)dst;
memcpy(&d2, buf, 2);
if (__put_user(d2, target))
goto fault;
break;
case 4:
}
case 4: {
u32 d4;
u32 __user *target = (u32 __user *)dst;
memcpy(&d4, buf, 4);
if (__put_user(d4, target))
goto fault;
break;
case 8:
}
case 8: {
u64 d8;
u64 __user *target = (u64 __user *)dst;
memcpy(&d8, buf, 8);
if (__put_user(d8, target))
goto fault;
break;
}
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
@@ -328,11 +339,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
char *src, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT;
char __user *s = (char __user *)src;
u64 d8;
u32 d4;
u16 d2;
u8 d1;
/*
* This function uses __get_user() independent of whether kernel or user
@@ -354,26 +360,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
case 1: {
u8 d1;
u8 __user *s = (u8 __user *)src;
if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
}
case 2: {
u16 d2;
u16 __user *s = (u16 __user *)src;
if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
}
case 4: {
u32 d4;
u32 __user *s = (u32 __user *)src;
if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
}
case 8: {
u64 d8;
u64 __user *s = (u64 __user *)src;
if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
}
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;

View File

@@ -1178,6 +1178,12 @@ void mark_tsc_unstable(char *reason)
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static void __init tsc_disable_clocksource_watchdog(void)
{
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
}
static void __init check_system_tsc_reliable(void)
{
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
@@ -1194,6 +1200,23 @@ static void __init check_system_tsc_reliable(void)
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
/*
* Disable the clocksource watchdog when the system has:
* - TSC running at constant frequency
* - TSC which does not stop in C-States
* - the TSC_ADJUST register which allows to detect even minimal
* modifications
* - not more than two sockets. As the number of sockets cannot be
* evaluated at the early boot stage where this has to be
* invoked, check the number of online memory nodes as a
* fallback solution which is an reasonable estimate.
*/
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
nr_online_nodes <= 2)
tsc_disable_clocksource_watchdog();
}
/*
@@ -1385,9 +1408,6 @@ static int __init init_tsc_clocksource(void)
if (tsc_unstable)
goto unreg;
if (tsc_clocksource_reliable || no_tsc_watchdog)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
@@ -1525,7 +1545,7 @@ void __init tsc_init(void)
}
if (tsc_clocksource_reliable || no_tsc_watchdog)
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
tsc_disable_clocksource_watchdog();
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();

View File

@@ -30,6 +30,7 @@ struct tsc_adjust {
};
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
static struct timer_list tsc_sync_check_timer;
/*
* TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
}
}
/*
* Normally the tsc_sync will be checked every time system enters idle
* state, but there is still caveat that a system won't enter idle,
* either because it's too busy or configured purposely to not enter
* idle.
*
* So setup a periodic timer (every 10 minutes) to make sure the check
* is always on.
*/
#define SYNC_CHECK_INTERVAL (HZ * 600)
static void tsc_sync_check_timer_fn(struct timer_list *unused)
{
int next_cpu;
tsc_verify_tsc_adjust(false);
/* Run the check for all onlined CPUs in turn */
next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first(cpu_online_mask);
tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
add_timer_on(&tsc_sync_check_timer, next_cpu);
}
static int __init start_sync_check_timer(void)
{
if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
return 0;
timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
add_timer(&tsc_sync_check_timer);
return 0;
}
late_initcall(start_sync_check_timer);
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
unsigned int cpu, bool bootcpu)
{

View File

@@ -5152,7 +5152,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

Some files were not shown because too many files have changed in this diff Show More