You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Changes in 5.10.133
KVM/VMX: Use TEST %REG,%REG instead of CMP $0,%REG in vmenter.SKVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw
objtool: Refactor ORC section generation
objtool: Add 'alt_group' struct
objtool: Support stack layout changes in alternatives
objtool: Support retpoline jump detection for vmlinux.o
objtool: Assume only ELF functions do sibling calls
objtool: Combine UNWIND_HINT_RET_OFFSET and UNWIND_HINT_FUNC
x86/xen: Support objtool validation in xen-asm.S
x86/xen: Support objtool vmlinux.o validation in xen-head.S
x86/alternative: Merge include files
x86/alternative: Support not-feature
x86/alternative: Support ALTERNATIVE_TERNARY
x86/alternative: Use ALTERNATIVE_TERNARY() in _static_cpu_has()
x86/insn: Rename insn_decode() to insn_decode_from_regs()
x86/insn: Add a __ignore_sync_check__ marker
x86/insn: Add an insn_decode() API
x86/insn-eval: Handle return values from the decoder
x86/alternative: Use insn_decode()
x86: Add insn_decode_kernel()
x86/alternatives: Optimize optimize_nops()
x86/retpoline: Simplify retpolines
objtool: Correctly handle retpoline thunk calls
objtool: Handle per arch retpoline naming
objtool: Rework the elf_rebuild_reloc_section() logic
objtool: Add elf_create_reloc() helper
objtool: Create reloc sections implicitly
objtool: Extract elf_strtab_concat()
objtool: Extract elf_symbol_add()
objtool: Add elf_create_undef_symbol()
objtool: Keep track of retpoline call sites
objtool: Cache instruction relocs
objtool: Skip magical retpoline .altinstr_replacement
objtool/x86: Rewrite retpoline thunk calls
objtool: Support asm jump tables
x86/alternative: Optimize single-byte NOPs at an arbitrary position
objtool: Fix .symtab_shndx handling for elf_create_undef_symbol()
objtool: Only rewrite unconditional retpoline thunk calls
objtool/x86: Ignore __x86_indirect_alt_* symbols
objtool: Don't make .altinstructions writable
objtool: Teach get_alt_entry() about more relocation types
objtool: print out the symbol type when complaining about it
objtool: Remove reloc symbol type checks in get_alt_entry()
objtool: Make .altinstructions section entry size consistent
objtool: Introduce CFI hash
objtool: Handle __sanitize_cov*() tail calls
objtool: Classify symbols
objtool: Explicitly avoid self modifying code in .altinstr_replacement
objtool,x86: Replace alternatives with .retpoline_sites
x86/retpoline: Remove unused replacement symbols
x86/asm: Fix register order
x86/asm: Fixup odd GEN-for-each-reg.h usage
x86/retpoline: Move the retpoline thunk declarations to nospec-branch.h
x86/retpoline: Create a retpoline thunk array
x86/alternative: Implement .retpoline_sites support
x86/alternative: Handle Jcc __x86_indirect_thunk_\reg
x86/alternative: Try inline spectre_v2=retpoline,amd
x86/alternative: Add debug prints to apply_retpolines()
bpf,x86: Simplify computing label offsets
bpf,x86: Respect X86_FEATURE_RETPOLINE*
x86/lib/atomic64_386_32: Rename things
x86: Prepare asm files for straight-line-speculation
x86: Prepare inline-asm for straight-line-speculation
x86/alternative: Relax text_poke_bp() constraint
objtool: Add straight-line-speculation validation
x86: Add straight-line-speculation mitigation
tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy'
kvm/emulate: Fix SETcc emulation function offsets with SLS
objtool: Default ignore INT3 for unreachable
crypto: x86/poly1305 - Fixup SLS
objtool: Fix SLS validation for kcov tail-call replacement
objtool: Fix code relocs vs weak symbols
objtool: Fix type of reloc::addend
objtool: Fix symbol creation
x86/entry: Remove skip_r11rcx
objtool: Fix objtool regression on x32 systems
x86/realmode: build with -D__DISABLE_EXPORTS
x86/kvm/vmx: Make noinstr clean
x86/cpufeatures: Move RETPOLINE flags to word 11
x86/retpoline: Cleanup some #ifdefery
x86/retpoline: Swizzle retpoline thunk
Makefile: Set retpoline cflags based on CONFIG_CC_IS_{CLANG,GCC}
x86/retpoline: Use -mfunction-return
x86: Undo return-thunk damage
x86,objtool: Create .return_sites
objtool: skip non-text sections when adding return-thunk sites
x86,static_call: Use alternative RET encoding
x86/ftrace: Use alternative RET encoding
x86/bpf: Use alternative RET encoding
x86/kvm: Fix SETcc emulation for return thunks
x86/vsyscall_emu/64: Don't use RET in vsyscall emulation
x86/sev: Avoid using __x86_return_thunk
x86: Use return-thunk in asm code
objtool: Treat .text.__x86.* as noinstr
x86: Add magic AMD return-thunk
x86/bugs: Report AMD retbleed vulnerability
x86/bugs: Add AMD retbleed= boot parameter
x86/bugs: Enable STIBP for JMP2RET
x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value
x86/entry: Add kernel IBRS implementation
x86/bugs: Optimize SPEC_CTRL MSR writes
x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS
x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation()
x86/bugs: Report Intel retbleed vulnerability
intel_idle: Disable IBRS during long idle
objtool: Update Retpoline validation
x86/xen: Rename SYS* entry points
x86/bugs: Add retbleed=ibpb
x86/bugs: Do IBPB fallback check only once
objtool: Add entry UNRET validation
x86/cpu/amd: Add Spectral Chicken
x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n
x86/speculation: Fix firmware entry SPEC_CTRL handling
x86/speculation: Fix SPEC_CTRL write on SMT state change
x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit
x86/speculation: Remove x86_spec_ctrl_mask
objtool: Re-add UNWIND_HINT_{SAVE_RESTORE}
KVM: VMX: Flatten __vmx_vcpu_run()
KVM: VMX: Convert launched argument to flags
KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
KVM: VMX: Fix IBRS handling after vmexit
x86/speculation: Fill RSB on vmexit for IBRS
x86/common: Stamp out the stepping madness
x86/cpu/amd: Enumerate BTC_NO
x86/retbleed: Add fine grained Kconfig knobs
x86/bugs: Add Cannon lake to RETBleed affected CPU list
x86/bugs: Do not enable IBPB-on-entry when IBPB is not supported
x86/kexec: Disable RET on kexec
x86/speculation: Disable RRSBA behavior
x86/static_call: Serialize __static_call_fixup() properly
tools/insn: Restore the relative include paths for cross building
x86, kvm: use proper ASM macros for kvm_vcpu_is_preempted
x86/xen: Fix initialisation in hypercall_page after rethunk
x86/ftrace: Add UNWIND_HINT_FUNC annotation for ftrace_stub
x86/asm/32: Fix ANNOTATE_UNRET_SAFE use on 32-bit
x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current
efi/x86: use naked RET on mixed mode call wrapper
x86/kvm: fix FASTOP_SIZE when return thunks are enabled
KVM: emulate: do not adjust size of fastop and setcc subroutines
tools arch x86: Sync the msr-index.h copy with the kernel sources
tools headers cpufeatures: Sync with the kernel sources
x86/bugs: Remove apostrophe typo
um: Add missing apply_returns()
x86: Use -mindirect-branch-cs-prefix for RETPOLINE builds
kvm: fix objtool relocation warning
objtool: Fix elf_create_undef_symbol() endianness
tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' - again
tools headers: Remove broken definition of __LITTLE_ENDIAN
Linux 5.10.133
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Change-Id: I7e23843058c509562ae3f3a68e0710f31249a087
241 lines
7.9 KiB
C
241 lines
7.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* include/linux/cpu.h - generic cpu definition
|
|
*
|
|
* This is mainly for topological representation. We define the
|
|
* basic 'struct cpu' here, which can be embedded in per-arch
|
|
* definitions of processors.
|
|
*
|
|
* Basic handling of the devices is done in drivers/base/cpu.c
|
|
*
|
|
* CPUs are exported via sysfs in the devices/system/cpu
|
|
* directory.
|
|
*/
|
|
#ifndef _LINUX_CPU_H_
|
|
#define _LINUX_CPU_H_
|
|
|
|
#include <linux/node.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/cpuhotplug.h>
|
|
|
|
struct device;
|
|
struct device_node;
|
|
struct attribute_group;
|
|
|
|
struct cpu {
|
|
int node_id; /* The node which contains the CPU */
|
|
int hotpluggable; /* creates sysfs control file if hotpluggable */
|
|
struct device dev;
|
|
};
|
|
|
|
extern void boot_cpu_init(void);
|
|
extern void boot_cpu_hotplug_init(void);
|
|
extern void cpu_init(void);
|
|
extern void trap_init(void);
|
|
|
|
extern int register_cpu(struct cpu *cpu, int num);
|
|
extern struct device *get_cpu_device(unsigned cpu);
|
|
extern bool cpu_is_hotpluggable(unsigned cpu);
|
|
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
|
|
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
|
|
int cpu, unsigned int *thread);
|
|
|
|
extern int cpu_add_dev_attr(struct device_attribute *attr);
|
|
extern void cpu_remove_dev_attr(struct device_attribute *attr);
|
|
|
|
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
|
|
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
|
|
|
|
extern ssize_t cpu_show_meltdown(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spectre_v1(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_l1tf(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_mds(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
|
|
struct device_attribute *attr,
|
|
char *buf);
|
|
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
|
|
struct device_attribute *attr,
|
|
char *buf);
|
|
extern ssize_t cpu_show_retbleed(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
|
|
extern __printf(4, 5)
|
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
const struct attribute_group **groups,
|
|
const char *fmt, ...);
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void unregister_cpu(struct cpu *cpu);
|
|
extern ssize_t arch_cpu_probe(const char *, size_t);
|
|
extern ssize_t arch_cpu_release(const char *, size_t);
|
|
#endif
|
|
|
|
/*
|
|
* These states are not related to the core CPU hotplug mechanism. They are
|
|
* used by various (sub)architectures to track internal state
|
|
*/
|
|
#define CPU_ONLINE 0x0002 /* CPU is up */
|
|
#define CPU_UP_PREPARE 0x0003 /* CPU coming up */
|
|
#define CPU_DEAD 0x0007 /* CPU dead */
|
|
#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */
|
|
#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */
|
|
#define CPU_BROKEN 0x000B /* CPU did not die properly */
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern bool cpuhp_tasks_frozen;
|
|
int add_cpu(unsigned int cpu);
|
|
int cpu_device_up(struct device *dev);
|
|
void notify_cpu_starting(unsigned int cpu);
|
|
extern void cpu_maps_update_begin(void);
|
|
extern void cpu_maps_update_done(void);
|
|
int bringup_hibernate_cpu(unsigned int sleep_cpu);
|
|
void bringup_nonboot_cpus(unsigned int setup_max_cpus);
|
|
|
|
#else /* CONFIG_SMP */
|
|
#define cpuhp_tasks_frozen 0
|
|
|
|
static inline void cpu_maps_update_begin(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_maps_update_done(void)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
extern struct bus_type cpu_subsys;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void cpus_write_lock(void);
|
|
extern void cpus_write_unlock(void);
|
|
extern void cpus_read_lock(void);
|
|
extern void cpus_read_unlock(void);
|
|
extern int cpus_read_trylock(void);
|
|
extern void lockdep_assert_cpus_held(void);
|
|
extern void cpu_hotplug_disable(void);
|
|
extern void cpu_hotplug_enable(void);
|
|
void clear_tasks_mm_cpumask(int cpu);
|
|
int remove_cpu(unsigned int cpu);
|
|
int pause_cpus(struct cpumask *cpumask);
|
|
int resume_cpus(struct cpumask *cpumask);
|
|
int cpu_device_down(struct device *dev);
|
|
extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
|
|
|
|
#else /* CONFIG_HOTPLUG_CPU */
|
|
|
|
static inline void cpus_write_lock(void) { }
|
|
static inline void cpus_write_unlock(void) { }
|
|
static inline void cpus_read_lock(void) { }
|
|
static inline void cpus_read_unlock(void) { }
|
|
static inline int cpus_read_trylock(void) { return true; }
|
|
static inline void lockdep_assert_cpus_held(void) { }
|
|
static inline void cpu_hotplug_disable(void) { }
|
|
static inline void cpu_hotplug_enable(void) { }
|
|
static inline int pause_cpus(struct cpumask *cpumask) { return -ENODEV; }
|
|
static inline int resume_cpus(struct cpumask *cpumask) { return -ENODEV; }
|
|
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
|
|
#endif /* !CONFIG_HOTPLUG_CPU */
|
|
|
|
/* Wrappers which go away once all code is converted */
|
|
static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
|
|
static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
|
|
static inline void get_online_cpus(void) { cpus_read_lock(); }
|
|
static inline void put_online_cpus(void) { cpus_read_unlock(); }
|
|
|
|
#ifdef CONFIG_PM_SLEEP_SMP
|
|
extern int freeze_secondary_cpus(int primary);
|
|
extern void thaw_secondary_cpus(void);
|
|
|
|
static inline int suspend_disable_secondary_cpus(void)
|
|
{
|
|
int cpu = 0;
|
|
|
|
if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
|
|
cpu = -1;
|
|
|
|
return freeze_secondary_cpus(cpu);
|
|
}
|
|
static inline void suspend_enable_secondary_cpus(void)
|
|
{
|
|
return thaw_secondary_cpus();
|
|
}
|
|
|
|
#else /* !CONFIG_PM_SLEEP_SMP */
|
|
static inline void thaw_secondary_cpus(void) {}
|
|
static inline int suspend_disable_secondary_cpus(void) { return 0; }
|
|
static inline void suspend_enable_secondary_cpus(void) { }
|
|
#endif /* !CONFIG_PM_SLEEP_SMP */
|
|
|
|
void cpu_startup_entry(enum cpuhp_state state);
|
|
|
|
void cpu_idle_poll_ctrl(bool enable);
|
|
|
|
/* Attach to any functions which should be considered cpuidle. */
|
|
#define __cpuidle __section(".cpuidle.text")
|
|
|
|
bool cpu_in_idle(unsigned long pc);
|
|
|
|
void arch_cpu_idle(void);
|
|
void arch_cpu_idle_prepare(void);
|
|
void arch_cpu_idle_enter(void);
|
|
void arch_cpu_idle_exit(void);
|
|
void arch_cpu_idle_dead(void);
|
|
|
|
int cpu_report_state(int cpu);
|
|
int cpu_check_up_prepare(int cpu);
|
|
void cpu_set_state_online(int cpu);
|
|
void play_idle_precise(u64 duration_ns, u64 latency_ns);
|
|
|
|
static inline void play_idle(unsigned long duration_us)
|
|
{
|
|
play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
bool cpu_wait_death(unsigned int cpu, int seconds);
|
|
bool cpu_report_death(void);
|
|
void cpuhp_report_idle_dead(void);
|
|
#else
|
|
static inline void cpuhp_report_idle_dead(void) { }
|
|
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
|
|
enum cpuhp_smt_control {
|
|
CPU_SMT_ENABLED,
|
|
CPU_SMT_DISABLED,
|
|
CPU_SMT_FORCE_DISABLED,
|
|
CPU_SMT_NOT_SUPPORTED,
|
|
CPU_SMT_NOT_IMPLEMENTED,
|
|
};
|
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
|
extern enum cpuhp_smt_control cpu_smt_control;
|
|
extern void cpu_smt_disable(bool force);
|
|
extern void cpu_smt_check_topology(void);
|
|
extern bool cpu_smt_possible(void);
|
|
extern int cpuhp_smt_enable(void);
|
|
extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
|
|
#else
|
|
# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
|
|
static inline void cpu_smt_disable(bool force) { }
|
|
static inline void cpu_smt_check_topology(void) { }
|
|
static inline bool cpu_smt_possible(void) { return false; }
|
|
static inline int cpuhp_smt_enable(void) { return 0; }
|
|
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
|
|
#endif
|
|
|
|
extern bool cpu_mitigations_off(void);
|
|
extern bool cpu_mitigations_auto_nosmt(void);
|
|
|
|
#endif /* _LINUX_CPU_H_ */
|