You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge 5.10.155 into android12-5.10-lts
Changes in 5.10.155
fuse: fix readdir cache race
hwspinlock: qcom: correct MMIO max register for newer SoCs
phy: stm32: fix an error code in probe
wifi: cfg80211: silence a sparse RCU warning
wifi: cfg80211: fix memory leak in query_regdb_file()
bpf, sockmap: Fix the sk->sk_forward_alloc warning of sk_stream_kill_queues
bpftool: Fix NULL pointer dereference when pin {PROG, MAP, LINK} without FILE
HID: hyperv: fix possible memory leak in mousevsc_probe()
bpf: Support for pointers beyond pkt_end.
bpf: Add helper macro bpf_for_each_reg_in_vstate
bpf: Fix wrong reg type conversion in release_reference()
net: gso: fix panic on frag_list with mixed head alloc types
macsec: delete new rxsc when offload fails
macsec: fix secy->n_rx_sc accounting
macsec: fix detection of RXSCs when toggling offloading
macsec: clear encryption keys from the stack after setting up offload
net: tun: Fix memory leaks of napi_get_frags
bnxt_en: Fix possible crash in bnxt_hwrm_set_coal()
bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer
net: fman: Unregister ethernet device on removal
capabilities: fix undefined behavior in bit shift for CAP_TO_MASK
KVM: s390x: fix SCK locking
KVM: s390: pv: don't allow userspace to set the clock under PV
net: lapbether: fix issue of dev reference count leakage in lapbeth_device_event()
hamradio: fix issue of dev reference count leakage in bpq_device_event()
drm/vc4: Fix missing platform_unregister_drivers() call in vc4_drm_register()
tcp: prohibit TCP_REPAIR_OPTIONS if data was already sent
ipv6: addrlabel: fix infoleak when sending struct ifaddrlblmsg to network
can: af_can: fix NULL pointer dereference in can_rx_register()
net: stmmac: dwmac-meson8b: fix meson8b_devm_clk_prepare_enable()
net: broadcom: Fix BCMGENET Kconfig
tipc: fix the msg->req tlv len check in tipc_nl_compat_name_table_dump_header
dmaengine: pxa_dma: use platform_get_irq_optional
dmaengine: mv_xor_v2: Fix a resource leak in mv_xor_v2_remove()
drivers: net: xgene: disable napi when register irq failed in xgene_enet_open()
perf stat: Fix printing os->prefix in CSV metrics output
net: marvell: prestera: fix memory leak in prestera_rxtx_switch_init()
net: nixge: disable napi when enable interrupts failed in nixge_open()
net/mlx5: Allow async trigger completion execution on single CPU systems
net/mlx5e: E-Switch, Fix comparing termination table instance
net: cpsw: disable napi in cpsw_ndo_open()
net: cxgb3_main: disable napi when bind qsets failed in cxgb_up()
cxgb4vf: shut down the adapter when t4vf_update_port_info() failed in cxgb4vf_open()
net: phy: mscc: macsec: clear encryption keys when freeing a flow
net: atlantic: macsec: clear encryption keys from the stack
ethernet: s2io: disable napi when start nic failed in s2io_card_up()
net: mv643xx_eth: disable napi when init rxq or txq failed in mv643xx_eth_open()
ethernet: tundra: free irq when alloc ring failed in tsi108_open()
net: macvlan: fix memory leaks of macvlan_common_newlink
riscv: process: fix kernel info leakage
riscv: vdso: fix build with llvm
riscv: Enable CMA support
riscv: Separate memory init from paging init
riscv: fix reserved memory setup
arm64: efi: Fix handling of misaligned runtime regions and drop warning
MIPS: jump_label: Fix compat branch range check
mmc: cqhci: Provide helper for resetting both SDHCI and CQHCI
mmc: sdhci-of-arasan: Fix SDHCI_RESET_ALL for CQHCI
mmc: sdhci_am654: Fix SDHCI_RESET_ALL for CQHCI
mmc: sdhci-tegra: Fix SDHCI_RESET_ALL for CQHCI
ALSA: hda/hdmi - enable runtime pm for more AMD display audio
ALSA: hda/ca0132: add quirk for EVGA Z390 DARK
ALSA: hda: fix potential memleak in 'add_widget_node'
ALSA: hda/realtek: Add Positivo C6300 model quirk
ALSA: usb-audio: Add quirk entry for M-Audio Micro
ALSA: usb-audio: Add DSD support for Accuphase DAC-60
vmlinux.lds.h: Fix placement of '.data..decrypted' section
ata: libata-scsi: fix SYNCHRONIZE CACHE (16) command failure
nilfs2: fix deadlock in nilfs_count_free_blocks()
nilfs2: fix use-after-free bug of ns_writer on remount
drm/i915/dmabuf: fix sg_table handling in map_dma_buf
platform/x86: hp_wmi: Fix rfkill causing soft blocked wifi
btrfs: selftests: fix wrong error check in btrfs_free_dummy_root()
mms: sdhci-esdhc-imx: Fix SDHCI_RESET_ALL for CQHCI
udf: Fix a slab-out-of-bounds write bug in udf_find_entry()
mm/memremap.c: map FS_DAX device memory as decrypted
can: j1939: j1939_send_one(): fix missing CAN header initialization
cert host tools: Stop complaining about deprecated OpenSSL functions
dmaengine: at_hdmac: Fix at_lli struct definition
dmaengine: at_hdmac: Don't start transactions at tx_submit level
dmaengine: at_hdmac: Start transfer for cyclic channels in issue_pending
dmaengine: at_hdmac: Fix premature completion of desc in issue_pending
dmaengine: at_hdmac: Do not call the complete callback on device_terminate_all
dmaengine: at_hdmac: Protect atchan->status with the channel lock
dmaengine: at_hdmac: Fix concurrency problems by removing atc_complete_all()
dmaengine: at_hdmac: Fix concurrency over descriptor
dmaengine: at_hdmac: Free the memset buf without holding the chan lock
dmaengine: at_hdmac: Fix concurrency over the active list
dmaengine: at_hdmac: Fix descriptor handling when issuing it to hardware
dmaengine: at_hdmac: Fix completion of unissued descriptor in case of errors
dmaengine: at_hdmac: Don't allow CPU to reorder channel enable
dmaengine: at_hdmac: Fix impossible condition
dmaengine: at_hdmac: Check return code of dma_async_device_register
net: tun: call napi_schedule_prep() to ensure we own a napi
mmc: sdhci-esdhc-imx: Convert the driver to DT-only
x86/cpu: Restore AMD's DE_CFG MSR after resume
io_uring: kill goto error handling in io_sqpoll_wait_sq()
Linux 5.10.155
Change-Id: Id7d803ed2db044ef465aab7e80fca8b4b07df258
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT).
|
||||
:Parameters: address of a buffer in user space to store the data (u8) to
|
||||
:Returns: -EFAULT if the given address is not accessible from kernel space;
|
||||
-EINVAL if setting the TOD clock extension to != 0 is not supported
|
||||
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
|
||||
|
||||
3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
|
||||
-----------------------------------
|
||||
@@ -224,6 +225,7 @@ the POP (u64).
|
||||
|
||||
:Parameters: address of a buffer in user space to store the data (u64) to
|
||||
:Returns: -EFAULT if the given address is not accessible from kernel space
|
||||
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
|
||||
|
||||
3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT
|
||||
-----------------------------------
|
||||
@@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0.
|
||||
(kvm_s390_vm_tod_clock) to
|
||||
:Returns: -EFAULT if the given address is not accessible from kernel space;
|
||||
-EINVAL if setting the TOD clock extension to != 0 is not supported
|
||||
-EOPNOTSUPP for a PV guest (TOD managed by the ultravisor)
|
||||
|
||||
4. GROUP: KVM_S390_VM_CRYPTO
|
||||
============================
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 154
|
||||
SUBLEVEL = 155
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
||||
@@ -12,6 +12,14 @@
|
||||
|
||||
#include <asm/efi.h>
|
||||
|
||||
static bool region_is_misaligned(const efi_memory_desc_t *md)
|
||||
{
|
||||
if (PAGE_SIZE == EFI_PAGE_SIZE)
|
||||
return false;
|
||||
return !PAGE_ALIGNED(md->phys_addr) ||
|
||||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
|
||||
* executable, everything else can be mapped with the XN bits
|
||||
@@ -25,14 +33,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
|
||||
if (type == EFI_MEMORY_MAPPED_IO)
|
||||
return PROT_DEVICE_nGnRE;
|
||||
|
||||
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
|
||||
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
|
||||
if (region_is_misaligned(md)) {
|
||||
static bool __initdata code_is_misaligned;
|
||||
|
||||
/*
|
||||
* If the region is not aligned to the page size of the OS, we
|
||||
* can not use strict permissions, since that would also affect
|
||||
* the mapping attributes of the adjacent regions.
|
||||
* Regions that are not aligned to the OS page size cannot be
|
||||
* mapped with strict permissions, as those might interfere
|
||||
* with the permissions that are needed by the adjacent
|
||||
* region's mapping. However, if we haven't encountered any
|
||||
* misaligned runtime code regions so far, we can safely use
|
||||
* non-executable permissions for non-code regions.
|
||||
*/
|
||||
return pgprot_val(PAGE_KERNEL_EXEC);
|
||||
code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
|
||||
|
||||
return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
|
||||
: pgprot_val(PAGE_KERNEL);
|
||||
}
|
||||
|
||||
/* R-- */
|
||||
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
|
||||
@@ -62,19 +78,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
|
||||
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
|
||||
md->type == EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
if (!PAGE_ALIGNED(md->phys_addr) ||
|
||||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
|
||||
/*
|
||||
* If the end address of this region is not aligned to page
|
||||
* size, the mapping is rounded up, and may end up sharing a
|
||||
* page frame with the next UEFI memory region. If we create
|
||||
* a block entry now, we may need to split it again when mapping
|
||||
* the next region, and support for that is going to be removed
|
||||
* from the MMU routines. So avoid block mappings altogether in
|
||||
* that case.
|
||||
*/
|
||||
/*
|
||||
* If this region is not aligned to the page size used by the OS, the
|
||||
* mapping will be rounded outwards, and may end up sharing a page
|
||||
* frame with an adjacent runtime memory region. Given that the page
|
||||
* table descriptor covering the shared page will be rewritten when the
|
||||
* adjacent region gets mapped, we must avoid block mappings here so we
|
||||
* don't have to worry about splitting them when that happens.
|
||||
*/
|
||||
if (region_is_misaligned(md))
|
||||
page_mappings_only = true;
|
||||
}
|
||||
|
||||
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
|
||||
md->num_pages << EFI_PAGE_SHIFT,
|
||||
@@ -101,6 +114,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
|
||||
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
|
||||
md->type != EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
if (region_is_misaligned(md))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Calling apply_to_page_range() is only safe on regions that are
|
||||
* guaranteed to be mapped down to pages. Since we are only called
|
||||
|
||||
@@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e,
|
||||
* The branch offset must fit in the instruction's 26
|
||||
* bit field.
|
||||
*/
|
||||
WARN_ON((offset >= BIT(25)) ||
|
||||
WARN_ON((offset >= (long)BIT(25)) ||
|
||||
(offset < -(long)BIT(25)));
|
||||
|
||||
insn.j_format.opcode = bc6_op;
|
||||
|
||||
@@ -470,6 +470,7 @@ extern void *dtb_early_va;
|
||||
extern uintptr_t dtb_early_pa;
|
||||
void setup_bootmem(void);
|
||||
void paging_init(void);
|
||||
void misc_mem_init(void);
|
||||
|
||||
#define FIRST_USER_ADDRESS 0
|
||||
|
||||
|
||||
@@ -111,6 +111,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
|
||||
{
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
memset(&p->thread.s, 0, sizeof(p->thread.s));
|
||||
|
||||
/* p->thread holds context to be restored by __switch_to() */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
/* Kernel thread */
|
||||
|
||||
@@ -96,6 +96,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
else
|
||||
pr_err("No DTB found in kernel mappings\n");
|
||||
#endif
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
misc_mem_init();
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
swiotlb_init(1);
|
||||
|
||||
@@ -30,7 +30,7 @@ obj-y += vdso.o vdso-syms.o
|
||||
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
||||
|
||||
# Disable -pg to prevent insert call site
|
||||
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
|
||||
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
# Disable profiling and instrumentation for VDSO code
|
||||
GCOV_PROFILE := n
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@@ -41,13 +42,14 @@ struct pt_alloc_ops {
|
||||
#endif
|
||||
};
|
||||
|
||||
static phys_addr_t dma32_phys_limit __ro_after_init;
|
||||
|
||||
static void __init zone_sizes_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
|
||||
(unsigned long) PFN_PHYS(max_low_pfn)));
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
|
||||
#endif
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
|
||||
@@ -193,6 +195,7 @@ void __init setup_bootmem(void)
|
||||
|
||||
max_pfn = PFN_DOWN(dram_end);
|
||||
max_low_pfn = max_pfn;
|
||||
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
|
||||
set_max_mapnr(max_low_pfn);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@@ -205,7 +208,7 @@ void __init setup_bootmem(void)
|
||||
*/
|
||||
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
|
||||
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
dma_contiguous_reserve(dma32_phys_limit);
|
||||
memblock_allow_resize();
|
||||
memblock_dump_all();
|
||||
}
|
||||
@@ -665,8 +668,12 @@ static void __init resource_init(void)
|
||||
void __init paging_init(void)
|
||||
{
|
||||
setup_vm_final();
|
||||
sparse_init();
|
||||
setup_zero_page();
|
||||
}
|
||||
|
||||
void __init misc_mem_init(void)
|
||||
{
|
||||
sparse_init();
|
||||
zone_sizes_init();
|
||||
resource_init();
|
||||
}
|
||||
|
||||
@@ -1092,6 +1092,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
||||
|
||||
static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
struct kvm_s390_vm_tod_clock gtod;
|
||||
@@ -1101,7 +1103,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
|
||||
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
||||
return -EINVAL;
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
__kvm_s390_set_tod_clock(kvm, >od);
|
||||
|
||||
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
||||
gtod.epoch_idx, gtod.tod);
|
||||
@@ -1132,7 +1134,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
sizeof(gtod.tod)))
|
||||
return -EFAULT;
|
||||
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
__kvm_s390_set_tod_clock(kvm, >od);
|
||||
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
||||
return 0;
|
||||
}
|
||||
@@ -1144,6 +1146,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
if (attr->flags)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
/*
|
||||
* For protected guests, the TOD is managed by the ultravisor, so trying
|
||||
* to change it will never bring the expected results.
|
||||
*/
|
||||
if (kvm_s390_pv_is_protected(kvm)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
switch (attr->attr) {
|
||||
case KVM_S390_VM_TOD_EXT:
|
||||
ret = kvm_s390_set_tod_ext(kvm, attr);
|
||||
@@ -1158,6 +1170,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3862,14 +3877,12 @@ retry:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod)
|
||||
static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_s390_tod_clock_ext htod;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
preempt_disable();
|
||||
|
||||
get_tod_clock_ext((char *)&htod);
|
||||
@@ -3890,7 +3903,15 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
if (!mutex_trylock(&kvm->lock))
|
||||
return 0;
|
||||
__kvm_s390_set_tod_clock(kvm, gtod);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -326,8 +326,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in kvm-s390.c */
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod);
|
||||
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
@@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
|
||||
kvm_s390_set_tod_clock(vcpu->kvm, >od);
|
||||
/*
|
||||
* To set the TOD clock the kvm lock must be taken, but the vcpu lock
|
||||
* is already held in handle_set_clock. The usual lock order is the
|
||||
* opposite. As SCK is deprecated and should not be used in several
|
||||
* cases, for example when the multiple epoch facility or TOD clock
|
||||
* steering facility is installed (see Principles of Operation), a
|
||||
* slow path can be used. If the lock can not be taken via try_lock,
|
||||
* the instruction will be retried via -EAGAIN at a later point in
|
||||
* time.
|
||||
*/
|
||||
if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) {
|
||||
kvm_s390_retry_instr(vcpu);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
kvm_s390_set_psw_cc(vcpu, 0);
|
||||
return 0;
|
||||
|
||||
@@ -489,6 +489,11 @@
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xc0011029
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
#define MSR_AMD64_BU_CFG2 0xc001102a
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
@@ -565,9 +570,6 @@
|
||||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
|
||||
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
|
||||
#define MSR_FAM10H_NODE_ID 0xc001100c
|
||||
#define MSR_F10H_DECFG 0xc0011029
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
|
||||
@@ -822,8 +822,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
|
||||
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
|
||||
}
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xC0011029
|
||||
|
||||
static void init_amd_ln(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
@@ -1018,8 +1016,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
||||
* is not present.
|
||||
*/
|
||||
msr_set_bit(MSR_F10H_DECFG,
|
||||
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
|
||||
msr_set_bit(MSR_AMD64_DE_CFG,
|
||||
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
|
||||
|
||||
/* A serializing LFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
|
||||
@@ -342,8 +342,8 @@ static void init_hygon(struct cpuinfo_x86 *c)
|
||||
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
||||
* is not present.
|
||||
*/
|
||||
msr_set_bit(MSR_F10H_DECFG,
|
||||
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
|
||||
msr_set_bit(MSR_AMD64_DE_CFG,
|
||||
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
|
||||
|
||||
/* A serializing LFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
|
||||
@@ -2475,9 +2475,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
|
||||
msr->data = 0;
|
||||
|
||||
switch (msr->index) {
|
||||
case MSR_F10H_DECFG:
|
||||
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
|
||||
msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
|
||||
case MSR_AMD64_DE_CFG:
|
||||
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
|
||||
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
|
||||
break;
|
||||
case MSR_IA32_PERF_CAPABILITIES:
|
||||
return 0;
|
||||
@@ -2584,7 +2584,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
msr_info->data = 0x1E;
|
||||
}
|
||||
break;
|
||||
case MSR_F10H_DECFG:
|
||||
case MSR_AMD64_DE_CFG:
|
||||
msr_info->data = svm->msr_decfg;
|
||||
break;
|
||||
default:
|
||||
@@ -2764,7 +2764,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
case MSR_VM_IGNNE:
|
||||
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||
break;
|
||||
case MSR_F10H_DECFG: {
|
||||
case MSR_AMD64_DE_CFG: {
|
||||
struct kvm_msr_entry msr_entry;
|
||||
|
||||
msr_entry.index = msr->index;
|
||||
|
||||
@@ -1362,7 +1362,7 @@ static const u32 msr_based_features_all[] = {
|
||||
MSR_IA32_VMX_EPT_VPID_CAP,
|
||||
MSR_IA32_VMX_VMFUNC,
|
||||
|
||||
MSR_F10H_DECFG,
|
||||
MSR_AMD64_DE_CFG,
|
||||
MSR_IA32_UCODE_REV,
|
||||
MSR_IA32_ARCH_CAPABILITIES,
|
||||
MSR_IA32_PERF_CAPABILITIES,
|
||||
|
||||
@@ -535,6 +535,7 @@ static void pm_save_spec_msr(void)
|
||||
MSR_TSX_FORCE_ABORT,
|
||||
MSR_IA32_MCU_OPT_CTRL,
|
||||
MSR_AMD64_LS_CFG,
|
||||
MSR_AMD64_DE_CFG,
|
||||
};
|
||||
|
||||
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
|
||||
|
||||
@@ -3303,6 +3303,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
|
||||
case REPORT_LUNS:
|
||||
case REQUEST_SENSE:
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case SYNCHRONIZE_CACHE_16:
|
||||
case REZERO_UNIT:
|
||||
case SEEK_6:
|
||||
case SEEK_10:
|
||||
@@ -3969,6 +3970,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
|
||||
return ata_scsi_write_same_xlat;
|
||||
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case SYNCHRONIZE_CACHE_16:
|
||||
if (ata_try_flush_cache(dev))
|
||||
return ata_scsi_flush_xlat;
|
||||
break;
|
||||
@@ -4215,6 +4217,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
|
||||
* turning this into a no-op.
|
||||
*/
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case SYNCHRONIZE_CACHE_16:
|
||||
fallthrough;
|
||||
|
||||
/* no-op's, complete with success */
|
||||
|
||||
@@ -237,6 +237,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
|
||||
ATC_SPIP_BOUNDARY(first->boundary));
|
||||
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
|
||||
ATC_DPIP_BOUNDARY(first->boundary));
|
||||
/* Don't allow CPU to reorder channel enable. */
|
||||
wmb();
|
||||
dma_writel(atdma, CHER, atchan->mask);
|
||||
|
||||
vdbg_dump_regs(atchan);
|
||||
@@ -297,7 +299,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
struct at_desc *desc_first = atc_first_active(atchan);
|
||||
struct at_desc *desc;
|
||||
int ret;
|
||||
u32 ctrla, dscr, trials;
|
||||
u32 ctrla, dscr;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* If the cookie doesn't match to the currently running transfer then
|
||||
@@ -367,7 +370,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
dscr = channel_readl(atchan, DSCR);
|
||||
rmb(); /* ensure DSCR is read before CTRLA */
|
||||
ctrla = channel_readl(atchan, CTRLA);
|
||||
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
|
||||
for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
|
||||
u32 new_dscr;
|
||||
|
||||
rmb(); /* ensure DSCR is read after CTRLA */
|
||||
@@ -393,7 +396,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
rmb(); /* ensure DSCR is read before CTRLA */
|
||||
ctrla = channel_readl(atchan, CTRLA);
|
||||
}
|
||||
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
|
||||
if (unlikely(i == ATC_MAX_DSCR_TRIALS))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* for the first descriptor we can be more accurate */
|
||||
@@ -443,18 +446,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
||||
if (!atc_chan_is_cyclic(atchan))
|
||||
dma_cookie_complete(txd);
|
||||
|
||||
/* If the transfer was a memset, free our temporary buffer */
|
||||
if (desc->memset_buffer) {
|
||||
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
|
||||
desc->memset_paddr);
|
||||
desc->memset_buffer = false;
|
||||
}
|
||||
|
||||
/* move children to free_list */
|
||||
list_splice_init(&desc->tx_list, &atchan->free_list);
|
||||
/* move myself to free_list */
|
||||
list_move(&desc->desc_node, &atchan->free_list);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
@@ -464,42 +455,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_complete_all - finish work for all transactions
|
||||
* @atchan: channel to complete transactions for
|
||||
*
|
||||
* Eventually submit queued descriptors if any
|
||||
*
|
||||
* Assume channel is idle while calling this function
|
||||
* Called with atchan->lock held and bh disabled
|
||||
*/
|
||||
static void atc_complete_all(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *desc, *_desc;
|
||||
LIST_HEAD(list);
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
|
||||
/*
|
||||
* Submit queued descriptors ASAP, i.e. before we go through
|
||||
* the completed ones.
|
||||
*/
|
||||
if (!list_empty(&atchan->queue))
|
||||
atc_dostart(atchan, atc_first_queued(atchan));
|
||||
/* empty active_list now it is completed */
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
/* empty queue list by moving descriptors (if any) to active_list */
|
||||
list_splice_init(&atchan->queue, &atchan->active_list);
|
||||
|
||||
/* move children to free_list */
|
||||
list_splice_init(&desc->tx_list, &atchan->free_list);
|
||||
/* add myself to free_list */
|
||||
list_add(&desc->desc_node, &atchan->free_list);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
/* If the transfer was a memset, free our temporary buffer */
|
||||
if (desc->memset_buffer) {
|
||||
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
|
||||
desc->memset_paddr);
|
||||
desc->memset_buffer = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -508,26 +477,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
|
||||
*/
|
||||
static void atc_advance_work(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *desc;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
ret = atc_chan_is_enabled(atchan);
|
||||
if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
|
||||
return spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
desc = atc_first_active(atchan);
|
||||
/* Remove the transfer node from the active list. */
|
||||
list_del_init(&desc->desc_node);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
if (list_empty(&atchan->active_list) ||
|
||||
list_is_singular(&atchan->active_list))
|
||||
return atc_complete_all(atchan);
|
||||
|
||||
atc_chain_complete(atchan, atc_first_active(atchan));
|
||||
atc_chain_complete(atchan, desc);
|
||||
|
||||
/* advance work */
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
atc_dostart(atchan, atc_first_active(atchan));
|
||||
if (!list_empty(&atchan->active_list)) {
|
||||
desc = atc_first_queued(atchan);
|
||||
list_move_tail(&desc->desc_node, &atchan->active_list);
|
||||
atc_dostart(atchan, desc);
|
||||
}
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
@@ -539,6 +510,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
|
||||
static void atc_handle_error(struct at_dma_chan *atchan)
|
||||
{
|
||||
struct at_desc *bad_desc;
|
||||
struct at_desc *desc;
|
||||
struct at_desc *child;
|
||||
unsigned long flags;
|
||||
|
||||
@@ -551,13 +523,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
|
||||
bad_desc = atc_first_active(atchan);
|
||||
list_del_init(&bad_desc->desc_node);
|
||||
|
||||
/* As we are stopped, take advantage to push queued descriptors
|
||||
* in active_list */
|
||||
list_splice_init(&atchan->queue, atchan->active_list.prev);
|
||||
|
||||
/* Try to restart the controller */
|
||||
if (!list_empty(&atchan->active_list))
|
||||
atc_dostart(atchan, atc_first_active(atchan));
|
||||
if (!list_empty(&atchan->active_list)) {
|
||||
desc = atc_first_queued(atchan);
|
||||
list_move_tail(&desc->desc_node, &atchan->active_list);
|
||||
atc_dostart(atchan, desc);
|
||||
}
|
||||
|
||||
/*
|
||||
* KERN_CRITICAL may seem harsh, but since this only happens
|
||||
@@ -672,19 +643,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
cookie = dma_cookie_assign(tx);
|
||||
|
||||
if (list_empty(&atchan->active_list)) {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
|
||||
desc->txd.cookie);
|
||||
atc_dostart(atchan, desc);
|
||||
list_add_tail(&desc->desc_node, &atchan->active_list);
|
||||
} else {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
|
||||
desc->txd.cookie);
|
||||
list_add_tail(&desc->desc_node, &atchan->queue);
|
||||
}
|
||||
|
||||
list_add_tail(&desc->desc_node, &atchan->queue);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
|
||||
desc->txd.cookie);
|
||||
return cookie;
|
||||
}
|
||||
|
||||
@@ -1418,11 +1381,8 @@ static int atc_terminate_all(struct dma_chan *chan)
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma *atdma = to_at_dma(chan->device);
|
||||
int chan_id = atchan->chan_common.chan_id;
|
||||
struct at_desc *desc, *_desc;
|
||||
unsigned long flags;
|
||||
|
||||
LIST_HEAD(list);
|
||||
|
||||
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||||
|
||||
/*
|
||||
@@ -1441,19 +1401,15 @@ static int atc_terminate_all(struct dma_chan *chan)
|
||||
cpu_relax();
|
||||
|
||||
/* active_list entries will end up before queued entries */
|
||||
list_splice_init(&atchan->queue, &list);
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
list_splice_tail_init(&atchan->queue, &atchan->free_list);
|
||||
list_splice_tail_init(&atchan->active_list, &atchan->free_list);
|
||||
|
||||
clear_bit(ATC_IS_PAUSED, &atchan->status);
|
||||
/* if channel dedicated to cyclic operations, free it */
|
||||
clear_bit(ATC_IS_CYCLIC, &atchan->status);
|
||||
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1508,20 +1464,26 @@ atc_tx_status(struct dma_chan *chan,
|
||||
}
|
||||
|
||||
/**
|
||||
* atc_issue_pending - try to finish work
|
||||
* atc_issue_pending - takes the first transaction descriptor in the pending
|
||||
* queue and starts the transfer.
|
||||
* @chan: target DMA channel
|
||||
*/
|
||||
static void atc_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "issue_pending\n");
|
||||
|
||||
/* Not needed for cyclic transfers */
|
||||
if (atc_chan_is_cyclic(atchan))
|
||||
return;
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
|
||||
return spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
atc_advance_work(atchan);
|
||||
desc = atc_first_queued(atchan);
|
||||
list_move_tail(&desc->desc_node, &atchan->active_list);
|
||||
atc_dostart(atchan, desc);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1939,7 +1901,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
|
||||
plat_dat->nr_channels);
|
||||
|
||||
dma_async_device_register(&atdma->dma_common);
|
||||
err = dma_async_device_register(&atdma->dma_common);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Unable to register: %d.\n", err);
|
||||
goto err_dma_async_device_register;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not return an error if the dmac node is not present in order to
|
||||
@@ -1959,6 +1925,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||
|
||||
err_of_dma_controller_register:
|
||||
dma_async_device_unregister(&atdma->dma_common);
|
||||
err_dma_async_device_register:
|
||||
dma_pool_destroy(atdma->memset_pool);
|
||||
err_memset_pool_create:
|
||||
dma_pool_destroy(atdma->dma_desc_pool);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user