You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge 5.10.129 into android12-5.10-lts
Changes in 5.10.129 drm/amdgpu: To flush tlb for MMHUB of RAVEN series ipv6: take care of disable_policy when restoring routes nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG SX6000LNP (AKA SPECTRIX S40G) nvdimm: Fix badblocks clear off-by-one error powerpc/prom_init: Fix kernel config grep powerpc/book3e: Fix PUD allocation size in map_kernel_page() powerpc/bpf: Fix use of user_pt_regs in uapi dm raid: fix accesses beyond end of raid member array dm raid: fix KASAN warning in raid5_add_disks s390/archrandom: simplify back to earlier design and initialize earlier SUNRPC: Fix READ_PLUS crasher net: rose: fix UAF bugs caused by timer handler net: usb: ax88179_178a: Fix packet receiving virtio-net: fix race between ndo_open() and virtio_device_ready() selftests/net: pass ipv6_args to udpgso_bench's IPv6 TCP test net: dsa: bcm_sf2: force pause link settings net: tun: unlink NAPI from device on destruction net: tun: stop NAPI when detaching queues net: dp83822: disable false carrier interrupt net: dp83822: disable rx error interrupt RDMA/qedr: Fix reporting QP timeout attribute RDMA/cm: Fix memory leak in ib_cm_insert_listen linux/dim: Fix divide by 0 in RDMA DIM usbnet: fix memory allocation in helpers net: ipv6: unexport __init-annotated seg6_hmac_net_init() NFSD: restore EINVAL error translation in nfsd_commit() caif_virtio: fix race between virtio_device_ready() and ndo_open() PM / devfreq: exynos-ppmu: Fix refcount leak in of_get_devfreq_events s390: remove unneeded 'select BUILD_BIN2C' netfilter: nft_dynset: restore set element counter when failing to update net/sched: act_api: Notify user space if any actions were flushed before error net: bonding: fix possible NULL deref in rlb code net: bonding: fix use-after-free after 802.3ad slave unbind nfc: nfcmrvl: Fix irq_of_parse_and_map() return value NFC: nxp-nci: Don't issue a zero length i2c_master_read() tipc: move bc link creation back to tipc_node_create epic100: fix use after free on rmmod io_uring: ensure that send/sendmsg and recv/recvmsg check sqe->ioprio tunnels: do not assume mac header is set in skb_tunnel_check_pmtu() net: tun: avoid disabling NAPI twice xfs: use current->journal_info for detecting transaction recursion xfs: rename variable mp to parsing_mp xfs: Skip repetitive warnings about mount options xfs: ensure xfs_errortag_random_default matches XFS_ERRTAG_MAX xfs: fix xfs_trans slab cache name xfs: update superblock counters correctly for !lazysbcount xfs: fix xfs_reflink_unshare usage of filemap_write_and_wait_range tcp: add a missing nf_reset_ct() in 3WHS handling xen/gntdev: Avoid blocking in unmap_grant_pages() drivers: cpufreq: Add missing of_node_put() in qoriq-cpufreq.c sit: use min ipv6/sit: fix ipip6_tunnel_get_prl return value hwmon: (ibmaem) don't call platform_device_del() if platform_device_add() fails selftests/rseq: remove ARRAY_SIZE define from individual tests selftests/rseq: introduce own copy of rseq uapi header selftests/rseq: Remove useless assignment to cpu variable selftests/rseq: Remove volatile from __rseq_abi selftests/rseq: Introduce rseq_get_abi() helper selftests/rseq: Introduce thread pointer getters selftests/rseq: Uplift rseq selftests for compatibility with glibc-2.35 selftests/rseq: Fix ppc32: wrong rseq_cs 32-bit field pointer on big endian selftests/rseq: Fix ppc32 missing instruction selection "u" and "x" for load/store selftests/rseq: Fix ppc32 offsets by using long rather than off_t selftests/rseq: Fix warnings about #if checks of undefined tokens selftests/rseq: Remove arm/mips asm goto compiler work-around selftests/rseq: Fix: work-around asm goto compiler bugs selftests/rseq: x86-64: use %fs segment selector for accessing rseq thread area selftests/rseq: x86-32: use %gs segment selector for accessing rseq thread area selftests/rseq: Change type of rseq_offset to ptrdiff_t xen/blkfront: fix leaking data in shared pages xen/netfront: fix leaking data in shared pages xen/netfront: force data bouncing when backend is untrusted xen/blkfront: force data bouncing when backend is untrusted xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses() xen/arm: Fix race in RB-tree based P2M accounting net: usb: qmi_wwan: add Telit 0x1060 composition net: usb: qmi_wwan: add Telit 0x1070 composition clocksource/drivers/ixp4xx: remove EXPORT_SYMBOL_GPL from ixp4xx_timer_setup() Linux 5.10.129 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I7a2bdb1fd13c78604c728f4cbfb6f659d7a348e3
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 128
|
||||
SUBLEVEL = 129
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
||||
@@ -62,11 +62,12 @@ out:
|
||||
|
||||
unsigned long __pfn_to_mfn(unsigned long pfn)
|
||||
{
|
||||
struct rb_node *n = phys_to_mach.rb_node;
|
||||
struct rb_node *n;
|
||||
struct xen_p2m_entry *entry;
|
||||
unsigned long irqflags;
|
||||
|
||||
read_lock_irqsave(&p2m_lock, irqflags);
|
||||
n = phys_to_mach.rb_node;
|
||||
while (n) {
|
||||
entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
|
||||
if (entry->pfn <= pfn &&
|
||||
@@ -153,10 +154,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
|
||||
int rc;
|
||||
unsigned long irqflags;
|
||||
struct xen_p2m_entry *p2m_entry;
|
||||
struct rb_node *n = phys_to_mach.rb_node;
|
||||
struct rb_node *n;
|
||||
|
||||
if (mfn == INVALID_P2M_ENTRY) {
|
||||
write_lock_irqsave(&p2m_lock, irqflags);
|
||||
n = phys_to_mach.rb_node;
|
||||
while (n) {
|
||||
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
|
||||
if (p2m_entry->pfn <= pfn &&
|
||||
|
||||
9
arch/powerpc/include/asm/bpf_perf_event.h
Normal file
9
arch/powerpc/include/asm/bpf_perf_event.h
Normal file
@@ -0,0 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H
|
||||
#define _ASM_POWERPC_BPF_PERF_EVENT_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
typedef struct user_pt_regs bpf_user_pt_regs_t;
|
||||
|
||||
#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */
|
||||
@@ -1,9 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
#define _UAPI__ASM_BPF_PERF_EVENT_H__
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
typedef struct user_pt_regs bpf_user_pt_regs_t;
|
||||
|
||||
#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
|
||||
@@ -13,7 +13,7 @@
|
||||
# If you really need to reference something from prom_init.o add
|
||||
# it to the list below:
|
||||
|
||||
grep "^CONFIG_KASAN=y$" .config >/dev/null
|
||||
grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
MEM_FUNCS="__memcpy __memset"
|
||||
|
||||
@@ -95,8 +95,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
pgdp = pgd_offset_k(ea);
|
||||
p4dp = p4d_offset(pgdp, ea);
|
||||
if (p4d_none(*p4dp)) {
|
||||
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
|
||||
p4d_populate(&init_mm, p4dp, pmdp);
|
||||
pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
|
||||
p4d_populate(&init_mm, p4dp, pudp);
|
||||
}
|
||||
pudp = pud_offset(p4dp, ea);
|
||||
if (pud_none(*pudp)) {
|
||||
@@ -105,7 +105,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
|
||||
}
|
||||
pmdp = pmd_offset(pudp, ea);
|
||||
if (!pmd_present(*pmdp)) {
|
||||
ptep = early_alloc_pgtable(PAGE_SIZE);
|
||||
ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
|
||||
pmd_populate_kernel(&init_mm, pmdp, ptep);
|
||||
}
|
||||
ptep = pte_offset_kernel(pmdp, ea);
|
||||
|
||||
@@ -507,7 +507,6 @@ config KEXEC
|
||||
config KEXEC_FILE
|
||||
bool "kexec file based system call"
|
||||
select KEXEC_CORE
|
||||
select BUILD_BIN2C
|
||||
depends on CRYPTO
|
||||
depends on CRYPTO_SHA256
|
||||
depends on CRYPTO_SHA256_S390
|
||||
|
||||
@@ -2,126 +2,17 @@
|
||||
/*
|
||||
* s390 arch random implementation.
|
||||
*
|
||||
* Copyright IBM Corp. 2017, 2018
|
||||
* Copyright IBM Corp. 2017, 2020
|
||||
* Author(s): Harald Freudenberger
|
||||
*
|
||||
* The s390_arch_random_generate() function may be called from random.c
|
||||
* in interrupt context. So this implementation does the best to be very
|
||||
* fast. There is a buffer of random data which is asynchronously checked
|
||||
* and filled by a workqueue thread.
|
||||
* If there are enough bytes in the buffer the s390_arch_random_generate()
|
||||
* just delivers these bytes. Otherwise false is returned until the
|
||||
* worker thread refills the buffer.
|
||||
* The worker fills the rng buffer by pulling fresh entropy from the
|
||||
* high quality (but slow) true hardware random generator. This entropy
|
||||
* is then spread over the buffer with an pseudo random generator PRNG.
|
||||
* As the arch_get_random_seed_long() fetches 8 bytes and the calling
|
||||
* function add_interrupt_randomness() counts this as 1 bit entropy the
|
||||
* distribution needs to make sure there is in fact 1 bit entropy contained
|
||||
* in 8 bytes of the buffer. The current values pull 32 byte entropy
|
||||
* and scatter this into a 2048 byte buffer. So 8 byte in the buffer
|
||||
* will contain 1 bit of entropy.
|
||||
* The worker thread is rescheduled based on the charge level of the
|
||||
* buffer but at least with 500 ms delay to avoid too much CPU consumption.
|
||||
* So the max. amount of rng data delivered via arch_get_random_seed is
|
||||
* limited to 4k bytes per second.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
|
||||
|
||||
atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
|
||||
EXPORT_SYMBOL(s390_arch_random_counter);
|
||||
|
||||
#define ARCH_REFILL_TICKS (HZ/2)
|
||||
#define ARCH_PRNG_SEED_SIZE 32
|
||||
#define ARCH_RNG_BUF_SIZE 2048
|
||||
|
||||
static DEFINE_SPINLOCK(arch_rng_lock);
|
||||
static u8 *arch_rng_buf;
|
||||
static unsigned int arch_rng_buf_idx;
|
||||
|
||||
static void arch_rng_refill_buffer(struct work_struct *);
|
||||
static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
|
||||
|
||||
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
|
||||
{
|
||||
/* max hunk is ARCH_RNG_BUF_SIZE */
|
||||
if (nbytes > ARCH_RNG_BUF_SIZE)
|
||||
return false;
|
||||
|
||||
/* lock rng buffer */
|
||||
if (!spin_trylock(&arch_rng_lock))
|
||||
return false;
|
||||
|
||||
/* try to resolve the requested amount of bytes from the buffer */
|
||||
arch_rng_buf_idx -= nbytes;
|
||||
if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
|
||||
memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
|
||||
atomic64_add(nbytes, &s390_arch_random_counter);
|
||||
spin_unlock(&arch_rng_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* not enough bytes in rng buffer, refill is done asynchronously */
|
||||
spin_unlock(&arch_rng_lock);
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(s390_arch_random_generate);
|
||||
|
||||
static void arch_rng_refill_buffer(struct work_struct *unused)
|
||||
{
|
||||
unsigned int delay = ARCH_REFILL_TICKS;
|
||||
|
||||
spin_lock(&arch_rng_lock);
|
||||
if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
|
||||
/* buffer is exhausted and needs refill */
|
||||
u8 seed[ARCH_PRNG_SEED_SIZE];
|
||||
u8 prng_wa[240];
|
||||
/* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
|
||||
cpacf_trng(NULL, 0, seed, sizeof(seed));
|
||||
/* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
|
||||
memset(prng_wa, 0, sizeof(prng_wa));
|
||||
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
|
||||
&prng_wa, NULL, 0, seed, sizeof(seed));
|
||||
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
|
||||
&prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
|
||||
arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
|
||||
}
|
||||
delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
|
||||
spin_unlock(&arch_rng_lock);
|
||||
|
||||
/* kick next check */
|
||||
queue_delayed_work(system_long_wq, &arch_rng_work, delay);
|
||||
}
|
||||
|
||||
static int __init s390_arch_random_init(void)
|
||||
{
|
||||
/* all the needed PRNO subfunctions available ? */
|
||||
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
|
||||
cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
|
||||
|
||||
/* alloc arch random working buffer */
|
||||
arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
|
||||
if (!arch_rng_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* kick worker queue job to fill the random buffer */
|
||||
queue_delayed_work(system_long_wq,
|
||||
&arch_rng_work, ARCH_REFILL_TICKS);
|
||||
|
||||
/* enable arch random to the outside world */
|
||||
static_branch_enable(&s390_arch_random_available);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(s390_arch_random_init);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Kernel interface for the s390 arch_random_* functions
|
||||
*
|
||||
* Copyright IBM Corp. 2017
|
||||
* Copyright IBM Corp. 2017, 2020
|
||||
*
|
||||
* Author: Harald Freudenberger <freude@de.ibm.com>
|
||||
*
|
||||
@@ -15,12 +15,11 @@
|
||||
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
|
||||
extern atomic64_t s390_arch_random_counter;
|
||||
|
||||
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
|
||||
|
||||
static inline bool __must_check arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
return false;
|
||||
@@ -34,7 +33,9 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
|
||||
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
return s390_arch_random_generate((u8 *)v, sizeof(*v));
|
||||
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
|
||||
atomic64_add(sizeof(*v), &s390_arch_random_counter);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -42,7 +43,9 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
|
||||
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
return s390_arch_random_generate((u8 *)v, sizeof(*v));
|
||||
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
|
||||
atomic64_add(sizeof(*v), &s390_arch_random_counter);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1009,6 +1009,11 @@ static void __init setup_randomness(void)
|
||||
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
|
||||
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
|
||||
memblock_free((unsigned long) vmms, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
|
||||
static_branch_enable(&s390_arch_random_available);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -151,6 +151,10 @@ static unsigned int xen_blkif_max_ring_order;
|
||||
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
|
||||
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
|
||||
|
||||
static bool __read_mostly xen_blkif_trusted = true;
|
||||
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
|
||||
MODULE_PARM_DESC(trusted, "Is the backend trusted");
|
||||
|
||||
#define BLK_RING_SIZE(info) \
|
||||
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
|
||||
|
||||
@@ -208,6 +212,7 @@ struct blkfront_info
|
||||
unsigned int feature_discard:1;
|
||||
unsigned int feature_secdiscard:1;
|
||||
unsigned int feature_persistent:1;
|
||||
unsigned int bounce:1;
|
||||
unsigned int discard_granularity;
|
||||
unsigned int discard_alignment;
|
||||
/* Number of 4KB segments handled */
|
||||
@@ -310,8 +315,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
|
||||
if (!gnt_list_entry)
|
||||
goto out_of_memory;
|
||||
|
||||
if (info->feature_persistent) {
|
||||
granted_page = alloc_page(GFP_NOIO);
|
||||
if (info->bounce) {
|
||||
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
|
||||
if (!granted_page) {
|
||||
kfree(gnt_list_entry);
|
||||
goto out_of_memory;
|
||||
@@ -330,7 +335,7 @@ out_of_memory:
|
||||
list_for_each_entry_safe(gnt_list_entry, n,
|
||||
&rinfo->grants, node) {
|
||||
list_del(&gnt_list_entry->node);
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
__free_page(gnt_list_entry->page);
|
||||
kfree(gnt_list_entry);
|
||||
i--;
|
||||
@@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
|
||||
/* Assign a gref to this page */
|
||||
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
|
||||
BUG_ON(gnt_list_entry->gref == -ENOSPC);
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
grant_foreign_access(gnt_list_entry, info);
|
||||
else {
|
||||
/* Grant access to the GFN passed by the caller */
|
||||
@@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
|
||||
/* Assign a gref to this page */
|
||||
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
|
||||
BUG_ON(gnt_list_entry->gref == -ENOSPC);
|
||||
if (!info->feature_persistent) {
|
||||
if (!info->bounce) {
|
||||
struct page *indirect_page;
|
||||
|
||||
/* Fetch a pre-allocated page to use for indirect grefs */
|
||||
@@ -715,7 +720,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
||||
.grant_idx = 0,
|
||||
.segments = NULL,
|
||||
.rinfo = rinfo,
|
||||
.need_copy = rq_data_dir(req) && info->feature_persistent,
|
||||
.need_copy = rq_data_dir(req) && info->bounce,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1035,11 +1040,12 @@ static void xlvbd_flush(struct blkfront_info *info)
|
||||
{
|
||||
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
|
||||
info->feature_fua ? true : false);
|
||||
pr_info("blkfront: %s: %s %s %s %s %s\n",
|
||||
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
|
||||
info->gd->disk_name, flush_info(info),
|
||||
"persistent grants:", info->feature_persistent ?
|
||||
"enabled;" : "disabled;", "indirect descriptors:",
|
||||
info->max_indirect_segments ? "enabled;" : "disabled;");
|
||||
info->max_indirect_segments ? "enabled;" : "disabled;",
|
||||
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
|
||||
}
|
||||
|
||||
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
|
||||
@@ -1273,7 +1279,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
||||
if (!list_empty(&rinfo->indirect_pages)) {
|
||||
struct page *indirect_page, *n;
|
||||
|
||||
BUG_ON(info->feature_persistent);
|
||||
BUG_ON(info->bounce);
|
||||
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
|
||||
list_del(&indirect_page->lru);
|
||||
__free_page(indirect_page);
|
||||
@@ -1290,7 +1296,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
||||
0, 0UL);
|
||||
rinfo->persistent_gnts_c--;
|
||||
}
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
__free_page(persistent_gnt->page);
|
||||
kfree(persistent_gnt);
|
||||
}
|
||||
@@ -1311,7 +1317,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
||||
for (j = 0; j < segs; j++) {
|
||||
persistent_gnt = rinfo->shadow[i].grants_used[j];
|
||||
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
__free_page(persistent_gnt->page);
|
||||
kfree(persistent_gnt);
|
||||
}
|
||||
@@ -1501,7 +1507,7 @@ static int blkif_completion(unsigned long *id,
|
||||
data.s = s;
|
||||
num_sg = s->num_sg;
|
||||
|
||||
if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
|
||||
if (bret->operation == BLKIF_OP_READ && info->bounce) {
|
||||
for_each_sg(s->sg, sg, num_sg, i) {
|
||||
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
|
||||
|
||||
@@ -1560,7 +1566,7 @@ static int blkif_completion(unsigned long *id,
|
||||
* Add the used indirect page back to the list of
|
||||
* available pages for indirect grefs.
|
||||
*/
|
||||
if (!info->feature_persistent) {
|
||||
if (!info->bounce) {
|
||||
indirect_page = s->indirect_grants[i]->page;
|
||||
list_add(&indirect_page->lru, &rinfo->indirect_pages);
|
||||
}
|
||||
@@ -1753,7 +1759,7 @@ static int setup_blkring(struct xenbus_device *dev,
|
||||
for (i = 0; i < info->nr_ring_pages; i++)
|
||||
rinfo->ring_ref[i] = GRANT_INVALID_REF;
|
||||
|
||||
sring = alloc_pages_exact(ring_size, GFP_NOIO);
|
||||
sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
|
||||
if (!sring) {
|
||||
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
|
||||
return -ENOMEM;
|
||||
@@ -1857,6 +1863,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
|
||||
if (!info)
|
||||
return -ENODEV;
|
||||
|
||||
/* Check if backend is trusted. */
|
||||
info->bounce = !xen_blkif_trusted ||
|
||||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
|
||||
|
||||
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"max-ring-page-order", 0);
|
||||
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
|
||||
@@ -2283,17 +2293,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
|
||||
if (err)
|
||||
goto out_of_memory;
|
||||
|
||||
if (!info->feature_persistent && info->max_indirect_segments) {
|
||||
if (!info->bounce && info->max_indirect_segments) {
|
||||
/*
|
||||
* We are using indirect descriptors but not persistent
|
||||
* grants, we need to allocate a set of pages that can be
|
||||
* We are using indirect descriptors but don't have a bounce
|
||||
* buffer, we need to allocate a set of pages that can be
|
||||
* used for mapping indirect grefs
|
||||
*/
|
||||
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
|
||||
|
||||
BUG_ON(!list_empty(&rinfo->indirect_pages));
|
||||
for (i = 0; i < num; i++) {
|
||||
struct page *indirect_page = alloc_page(GFP_KERNEL);
|
||||
struct page *indirect_page = alloc_page(GFP_KERNEL |
|
||||
__GFP_ZERO);
|
||||
if (!indirect_page)
|
||||
goto out_of_memory;
|
||||
list_add(&indirect_page->lru, &rinfo->indirect_pages);
|
||||
@@ -2386,6 +2397,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
|
||||
info->feature_persistent =
|
||||
!!xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-persistent", 0);
|
||||
if (info->feature_persistent)
|
||||
info->bounce = true;
|
||||
|
||||
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-max-indirect-segments", 0);
|
||||
@@ -2759,6 +2772,13 @@ static void blkfront_delay_work(struct work_struct *work)
|
||||
struct blkfront_info *info;
|
||||
bool need_schedule_work = false;
|
||||
|
||||
/*
|
||||
* Note that when using bounce buffers but not persistent grants
|
||||
* there's no need to run blkfront_delay_work because grants are
|
||||
* revoked in blkif_completion or else an error is reported and the
|
||||
* connection is closed.
|
||||
*/
|
||||
|
||||
mutex_lock(&blkfront_mutex);
|
||||
|
||||
list_for_each_entry(info, &info_list, info_list) {
|
||||
|
||||
@@ -258,7 +258,6 @@ void __init ixp4xx_timer_setup(resource_size_t timerbase,
|
||||
}
|
||||
ixp4xx_timer_register(base, timer_irq, timer_freq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static __init int ixp4xx_of_timer_init(struct device_node *np)
|
||||
|
||||
@@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev)
|
||||
|
||||
np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
|
||||
if (np) {
|
||||
of_node_put(np);
|
||||
dev_info(&pdev->dev, "Disabling due to erratum A-008083");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@@ -514,15 +514,19 @@ static int of_get_devfreq_events(struct device_node *np,
|
||||
|
||||
count = of_get_child_count(events_np);
|
||||
desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
|
||||
if (!desc)
|
||||
if (!desc) {
|
||||
of_node_put(events_np);
|
||||
return -ENOMEM;
|
||||
}
|
||||
info->num_events = count;
|
||||
|
||||
of_id = of_match_device(exynos_ppmu_id_match, dev);
|
||||
if (of_id)
|
||||
info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
|
||||
else
|
||||
else {
|
||||
of_node_put(events_np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
j = 0;
|
||||
for_each_child_of_node(events_np, node) {
|
||||
|
||||
@@ -689,7 +689,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
|
||||
const uint32_t flush_type = 0;
|
||||
bool all_hub = false;
|
||||
|
||||
if (adev->family == AMDGPU_FAMILY_AI)
|
||||
if (adev->family == AMDGPU_FAMILY_AI ||
|
||||
adev->family == AMDGPU_FAMILY_RV)
|
||||
all_hub = true;
|
||||
|
||||
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
|
||||
|
||||
@@ -550,7 +550,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
|
||||
|
||||
res = platform_device_add(data->pdev);
|
||||
if (res)
|
||||
goto ipmi_err;
|
||||
goto dev_add_err;
|
||||
|
||||
platform_set_drvdata(data->pdev, data);
|
||||
|
||||
@@ -598,7 +598,9 @@ hwmon_reg_err:
|
||||
ipmi_destroy_user(data->ipmi.user);
|
||||
ipmi_err:
|
||||
platform_set_drvdata(data->pdev, NULL);
|
||||
platform_device_unregister(data->pdev);
|
||||
platform_device_del(data->pdev);
|
||||
dev_add_err:
|
||||
platform_device_put(data->pdev);
|
||||
dev_err:
|
||||
ida_simple_remove(&aem_ida, data->id);
|
||||
id_err:
|
||||
@@ -690,7 +692,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
|
||||
|
||||
res = platform_device_add(data->pdev);
|
||||
if (res)
|
||||
goto ipmi_err;
|
||||
goto dev_add_err;
|
||||
|
||||
platform_set_drvdata(data->pdev, data);
|
||||
|
||||
@@ -738,7 +740,9 @@ hwmon_reg_err:
|
||||
ipmi_destroy_user(data->ipmi.user);
|
||||
ipmi_err:
|
||||
platform_set_drvdata(data->pdev, NULL);
|
||||
platform_device_unregister(data->pdev);
|
||||
platform_device_del(data->pdev);
|
||||
dev_add_err:
|
||||
platform_device_put(data->pdev);
|
||||
dev_err:
|
||||
ida_simple_remove(&aem_ida, data->id);
|
||||
id_err:
|
||||
|
||||
@@ -1280,8 +1280,10 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
|
||||
return ERR_CAST(cm_id_priv);
|
||||
|
||||
err = cm_init_listen(cm_id_priv, service_id, 0);
|
||||
if (err)
|
||||
if (err) {
|
||||
ib_destroy_cm_id(&cm_id_priv->id);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
spin_lock_irq(&cm_id_priv->lock);
|
||||
listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
|
||||
|
||||
@@ -418,6 +418,7 @@ struct qedr_qp {
|
||||
u32 sq_psn;
|
||||
u32 qkey;
|
||||
u32 dest_qp_num;
|
||||
u8 timeout;
|
||||
|
||||
/* Relevant to qps created from kernel space only (ULPs) */
|
||||
u8 prev_wqe_size;
|
||||
|
||||
@@ -2622,6 +2622,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
1 << max_t(int, attr->timeout - 8, 0);
|
||||
else
|
||||
qp_params.ack_timeout = 0;
|
||||
|
||||
qp->timeout = attr->timeout;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_RETRY_CNT) {
|
||||
@@ -2781,7 +2783,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
|
||||
rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
|
||||
rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
|
||||
rdma_ah_set_sl(&qp_attr->ah_attr, 0);
|
||||
qp_attr->timeout = params.timeout;
|
||||
qp_attr->timeout = qp->timeout;
|
||||
qp_attr->rnr_retry = params.rnr_retry;
|
||||
qp_attr->retry_cnt = params.retry_cnt;
|
||||
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
|
||||
|
||||
@@ -1002,12 +1002,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
|
||||
static int validate_raid_redundancy(struct raid_set *rs)
|
||||
{
|
||||
unsigned int i, rebuild_cnt = 0;
|
||||
unsigned int rebuilds_per_group = 0, copies;
|
||||
unsigned int rebuilds_per_group = 0, copies, raid_disks;
|
||||
unsigned int group_size, last_group_start;
|
||||
|
||||
for (i = 0; i < rs->md.raid_disks; i++)
|
||||
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
|
||||
!rs->dev[i].rdev.sb_page)
|
||||
for (i = 0; i < rs->raid_disks; i++)
|
||||
if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
|
||||
((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
|
||||
!rs->dev[i].rdev.sb_page)))
|
||||
rebuild_cnt++;
|
||||
|
||||
switch (rs->md.level) {
|
||||
@@ -1047,8 +1048,9 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
||||
* A A B B C
|
||||
* C D D E E
|
||||
*/
|
||||
raid_disks = min(rs->raid_disks, rs->md.raid_disks);
|
||||
if (__is_raid10_near(rs->md.new_layout)) {
|
||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
||||
for (i = 0; i < raid_disks; i++) {
|
||||
if (!(i % copies))
|
||||
rebuilds_per_group = 0;
|
||||
if ((!rs->dev[i].rdev.sb_page ||
|
||||
@@ -1071,10 +1073,10 @@ static int validate_raid_redundancy(struct raid_set *rs)
|
||||
* results in the need to treat the last (potentially larger)
|
||||
* set differently.
|
||||
*/
|
||||
group_size = (rs->md.raid_disks / copies);
|
||||
last_group_start = (rs->md.raid_disks / group_size) - 1;
|
||||
group_size = (raid_disks / copies);
|
||||
last_group_start = (raid_disks / group_size) - 1;
|
||||
last_group_start *= group_size;
|
||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
||||
for (i = 0; i < raid_disks; i++) {
|
||||
if (!(i % copies) && !(i > last_group_start))
|
||||
rebuilds_per_group = 0;
|
||||
if ((!rs->dev[i].rdev.sb_page ||
|
||||
@@ -1589,7 +1591,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rs->md.raid_disks; i++) {
|
||||
for (i = 0; i < rs->raid_disks; i++) {
|
||||
struct md_rdev *rdev = &rs->dev[i].rdev;
|
||||
|
||||
if (!test_bit(Journal, &rdev->flags) &&
|
||||
@@ -3732,13 +3734,13 @@ static int raid_iterate_devices(struct dm_target *ti,
|
||||
unsigned int i;
|
||||
int r = 0;
|
||||
|
||||
for (i = 0; !r && i < rs->md.raid_disks; i++)
|
||||
if (rs->dev[i].data_dev)
|
||||
r = fn(ti,
|
||||
rs->dev[i].data_dev,
|
||||
0, /* No offset on data devs */
|
||||
rs->md.dev_sectors,
|
||||
data);
|
||||
for (i = 0; !r && i < rs->raid_disks; i++) {
|
||||
if (rs->dev[i].data_dev) {
|
||||
r = fn(ti, rs->dev[i].data_dev,
|
||||
0, /* No offset on data devs */
|
||||
rs->md.dev_sectors, data);
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user