You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - the rest of MM - procfs updates - various misc things - more y2038 fixes - get_maintainer updates - lib/ updates - checkpatch updates - various epoll updates - autofs updates - hfsplus - some reiserfs work - fatfs updates - signal.c cleanups - ipc/ updates * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (166 commits) ipc/util.c: update return value of ipc_getref from int to bool ipc/util.c: further variable name cleanups ipc: simplify ipc initialization ipc: get rid of ids->tables_initialized hack lib/rhashtable: guarantee initial hashtable allocation lib/rhashtable: simplify bucket_table_alloc() ipc: drop ipc_lock() ipc/util.c: correct comment in ipc_obtain_object_check ipc: rename ipcctl_pre_down_nolock() ipc/util.c: use ipc_rcu_putref() for failues in ipc_addid() ipc: reorganize initialization of kern_ipc_perm.seq ipc: compute kern_ipc_perm.id under the ipc lock init/Kconfig: remove EXPERT from CHECKPOINT_RESTORE fs/sysv/inode.c: use ktime_get_real_seconds() for superblock stamp adfs: use timespec64 for time conversion kernel/sysctl.c: fix typos in comments drivers/rapidio/devices/rio_mport_cdev.c: remove redundant pointer md fork: don't copy inconsistent signal handler state to child signal: make get_signal() return bool signal: make sigkill_pending() return bool ...
This commit is contained in:
@@ -1072,6 +1072,24 @@ PAGE_SIZE multiple when read back.
|
||||
high limit is used and monitored properly, this limit's
|
||||
utility is limited to providing the final safety net.
|
||||
|
||||
memory.oom.group
|
||||
A read-write single value file which exists on non-root
|
||||
cgroups. The default value is "0".
|
||||
|
||||
Determines whether the cgroup should be treated as
|
||||
an indivisible workload by the OOM killer. If set,
|
||||
all tasks belonging to the cgroup or to its descendants
|
||||
(if the memory cgroup is not a leaf cgroup) are killed
|
||||
together or not at all. This can be used to avoid
|
||||
partial kills to guarantee workload integrity.
|
||||
|
||||
Tasks with the OOM protection (oom_score_adj set to -1000)
|
||||
are treated as an exception and are never killed.
|
||||
|
||||
If the OOM killer is invoked in a cgroup, it's not going
|
||||
to kill any tasks outside of this cgroup, regardless
|
||||
memory.oom.group values of ancestor cgroups.
|
||||
|
||||
memory.events
|
||||
A read-only flat-keyed file which exists on non-root cgroups.
|
||||
The following entries are defined. Unless specified
|
||||
|
||||
@@ -3041,8 +3041,9 @@
|
||||
on: enable the feature
|
||||
|
||||
page_poison= [KNL] Boot-time parameter changing the state of
|
||||
poisoning on the buddy allocator.
|
||||
off: turn off poisoning
|
||||
poisoning on the buddy allocator, available with
|
||||
CONFIG_PAGE_POISONING=y.
|
||||
off: turn off poisoning (default)
|
||||
on: turn on poisoning
|
||||
|
||||
panic= [KNL] Kernel behaviour on panic: delay <timeout>
|
||||
|
||||
@@ -870,6 +870,7 @@ Committed_AS: 100056 kB
|
||||
VmallocTotal: 112216 kB
|
||||
VmallocUsed: 428 kB
|
||||
VmallocChunk: 111088 kB
|
||||
Percpu: 62080 kB
|
||||
HardwareCorrupted: 0 kB
|
||||
AnonHugePages: 49152 kB
|
||||
ShmemHugePages: 0 kB
|
||||
@@ -962,6 +963,8 @@ Committed_AS: The amount of memory presently allocated on the system.
|
||||
VmallocTotal: total size of vmalloc memory area
|
||||
VmallocUsed: amount of vmalloc area which is used
|
||||
VmallocChunk: largest contiguous block of vmalloc area which is free
|
||||
Percpu: Memory allocated to the percpu allocator used to back percpu
|
||||
allocations. This stat excludes the cost of metadata.
|
||||
|
||||
..............................................................................
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ show up in /proc/sys/kernel:
|
||||
- hung_task_panic
|
||||
- hung_task_check_count
|
||||
- hung_task_timeout_secs
|
||||
- hung_task_check_interval_secs
|
||||
- hung_task_warnings
|
||||
- hyperv_record_panic_msg
|
||||
- kexec_load_disabled
|
||||
@@ -355,7 +356,7 @@ This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
|
||||
|
||||
hung_task_timeout_secs:
|
||||
|
||||
Check interval. When a task in D state did not get scheduled
|
||||
When a task in D state did not get scheduled
|
||||
for more than this value report a warning.
|
||||
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
|
||||
|
||||
@@ -364,6 +365,18 @@ Possible values to set are in range {0..LONG_MAX/HZ}.
|
||||
|
||||
==============================================================
|
||||
|
||||
hung_task_check_interval_secs:
|
||||
|
||||
Hung task check interval. If hung task checking is enabled
|
||||
(see hung_task_timeout_secs), the check is done every
|
||||
hung_task_check_interval_secs seconds.
|
||||
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
|
||||
|
||||
0 (default): means use hung_task_timeout_secs as checking interval.
|
||||
Possible values to set are in range {0..LONG_MAX/HZ}.
|
||||
|
||||
==============================================================
|
||||
|
||||
hung_task_warnings:
|
||||
|
||||
The maximum number of warnings to report. During a check interval
|
||||
@@ -451,7 +464,8 @@ Notes:
|
||||
1) kernel doesn't guarantee, that new object will have desired id. So,
|
||||
it's up to userspace, how to handle an object with "wrong" id.
|
||||
2) Toggle with non-default value will be set back to -1 by kernel after
|
||||
successful IPC object allocation.
|
||||
successful IPC object allocation. If an IPC object allocation syscall
|
||||
fails, it is undefined if the value remains unmodified or is reset to -1.
|
||||
|
||||
==============================================================
|
||||
|
||||
|
||||
@@ -691,7 +691,7 @@ and don't use much of it.
|
||||
The default value is 0.
|
||||
|
||||
See Documentation/vm/overcommit-accounting.rst and
|
||||
mm/mmap.c::__vm_enough_memory() for more information.
|
||||
mm/util.c::__vm_enough_memory() for more information.
|
||||
|
||||
==============================================================
|
||||
|
||||
|
||||
@@ -841,6 +841,16 @@ config REFCOUNT_FULL
|
||||
against various use-after-free conditions that can be used in
|
||||
security flaw exploits.
|
||||
|
||||
config HAVE_ARCH_PREL32_RELOCATIONS
|
||||
bool
|
||||
help
|
||||
May be selected by an architecture if it supports place-relative
|
||||
32-bit relocations, both in the toolchain and in the module loader,
|
||||
in which case relative references can be used in special sections
|
||||
for PCI fixup, initcalls etc which are only half the size on 64 bit
|
||||
architectures, and don't require runtime relocation on relocatable
|
||||
kernels.
|
||||
|
||||
source "kernel/gcov/Kconfig"
|
||||
|
||||
source "scripts/gcc-plugins/Kconfig"
|
||||
|
||||
@@ -330,16 +330,15 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
* atomic helpers. Insert it into the gate_vma so that it is visible
|
||||
* through ptrace and /proc/<pid>/mem.
|
||||
*/
|
||||
static struct vm_area_struct gate_vma = {
|
||||
.vm_start = 0xffff0000,
|
||||
.vm_end = 0xffff0000 + PAGE_SIZE,
|
||||
.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
|
||||
};
|
||||
static struct vm_area_struct gate_vma;
|
||||
|
||||
static int __init gate_vma_init(void)
|
||||
{
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
|
||||
gate_vma.vm_start = 0xffff0000;
|
||||
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
|
||||
gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(gate_vma_init);
|
||||
|
||||
@@ -108,6 +108,7 @@ config ARM64
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
select HAVE_ARCH_PREL32_RELOCATIONS
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_STACKLEAK
|
||||
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
|
||||
|
||||
@@ -177,6 +177,7 @@ config PPC
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
select HAVE_ARCH_PREL32_RELOCATIONS
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_CBPF_JIT if !PPC64
|
||||
|
||||
@@ -124,6 +124,7 @@ config X86
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
|
||||
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
|
||||
select HAVE_ARCH_PREL32_RELOCATIONS
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
|
||||
@@ -23,11 +23,8 @@
|
||||
* _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
|
||||
* While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
|
||||
* which is meaningless and will cause compiling error in some cases.
|
||||
* So do not include linux/export.h and define EXPORT_SYMBOL(sym)
|
||||
* as empty.
|
||||
*/
|
||||
#define _LINUX_EXPORT_H
|
||||
#define EXPORT_SYMBOL(sym)
|
||||
#define __DISABLE_EXPORTS
|
||||
|
||||
#include "misc.h"
|
||||
#include "error.h"
|
||||
|
||||
@@ -8,5 +8,6 @@ generated-y += xen-hypercalls.h
|
||||
|
||||
generic-y += dma-contiguous.h
|
||||
generic-y += early_ioremap.h
|
||||
generic-y += export.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifdef CONFIG_64BIT
|
||||
#define KSYM_ALIGN 16
|
||||
#endif
|
||||
#include <asm-generic/export.h>
|
||||
+5
-2
@@ -7305,8 +7305,9 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
|
||||
}
|
||||
|
||||
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end)
|
||||
int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end,
|
||||
bool blockable)
|
||||
{
|
||||
unsigned long apic_address;
|
||||
|
||||
@@ -7317,6 +7318,8 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
||||
if (start <= apic_address && apic_address < end)
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
||||
|
||||
@@ -337,6 +337,7 @@ static ssize_t backing_dev_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
char *file_name;
|
||||
size_t sz;
|
||||
struct file *backing_dev = NULL;
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
@@ -357,7 +358,11 @@ static ssize_t backing_dev_store(struct device *dev,
|
||||
goto out;
|
||||
}
|
||||
|
||||
strlcpy(file_name, buf, len);
|
||||
strlcpy(file_name, buf, PATH_MAX);
|
||||
/* ignore trailing newline */
|
||||
sz = strlen(file_name);
|
||||
if (sz > 0 && file_name[sz - 1] == '\n')
|
||||
file_name[sz - 1] = 0x00;
|
||||
|
||||
backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
|
||||
if (IS_ERR(backing_dev)) {
|
||||
|
||||
@@ -24,6 +24,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
|
||||
-D__NO_FORTIFY \
|
||||
$(call cc-option,-ffreestanding) \
|
||||
$(call cc-option,-fno-stack-protector) \
|
||||
-D__DISABLE_EXPORTS
|
||||
|
||||
GCOV_PROFILE := n
|
||||
KASAN_SANITIZE := n
|
||||
|
||||
@@ -178,12 +178,18 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
|
||||
*
|
||||
* @amn: our notifier
|
||||
*/
|
||||
static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
|
||||
static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
|
||||
{
|
||||
mutex_lock(&amn->read_lock);
|
||||
if (blockable)
|
||||
mutex_lock(&amn->read_lock);
|
||||
else if (!mutex_trylock(&amn->read_lock))
|
||||
return -EAGAIN;
|
||||
|
||||
if (atomic_inc_return(&amn->recursion) == 1)
|
||||
down_read_non_owner(&amn->lock);
|
||||
mutex_unlock(&amn->read_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -239,10 +245,11 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
|
||||
* Block for operations on BOs to finish and mark pages as accessed and
|
||||
* potentially dirty.
|
||||
*/
|
||||
static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
||||
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
{
|
||||
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
|
||||
struct interval_tree_node *it;
|
||||
@@ -250,17 +257,28 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
|
||||
amdgpu_mn_read_lock(amn);
|
||||
/* TODO we should be able to split locking for interval tree and
|
||||
* amdgpu_mn_invalidate_node
|
||||
*/
|
||||
if (amdgpu_mn_read_lock(amn, blockable))
|
||||
return -EAGAIN;
|
||||
|
||||
it = interval_tree_iter_first(&amn->objects, start, end);
|
||||
while (it) {
|
||||
struct amdgpu_mn_node *node;
|
||||
|
||||
if (!blockable) {
|
||||
amdgpu_mn_read_unlock(amn);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
|
||||
amdgpu_mn_invalidate_node(node, start, end);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -275,10 +293,11 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
|
||||
* necessitates evicting all user-mode queues of the process. The BOs
|
||||
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
|
||||
*/
|
||||
static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
||||
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
{
|
||||
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
|
||||
struct interval_tree_node *it;
|
||||
@@ -286,13 +305,19 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
|
||||
amdgpu_mn_read_lock(amn);
|
||||
if (amdgpu_mn_read_lock(amn, blockable))
|
||||
return -EAGAIN;
|
||||
|
||||
it = interval_tree_iter_first(&amn->objects, start, end);
|
||||
while (it) {
|
||||
struct amdgpu_mn_node *node;
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
if (!blockable) {
|
||||
amdgpu_mn_read_unlock(amn);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
|
||||
@@ -304,6 +329,8 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
|
||||
amdgpu_amdkfd_evict_userptr(mem, mm);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -112,10 +112,11 @@ static void del_object(struct i915_mmu_object *mo)
|
||||
mo->attached = false;
|
||||
}
|
||||
|
||||
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
{
|
||||
struct i915_mmu_notifier *mn =
|
||||
container_of(_mn, struct i915_mmu_notifier, mn);
|
||||
@@ -124,7 +125,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
LIST_HEAD(cancelled);
|
||||
|
||||
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* interval ranges are inclusive, but invalidate range is exclusive */
|
||||
end--;
|
||||
@@ -132,6 +133,10 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
spin_lock(&mn->lock);
|
||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||
while (it) {
|
||||
if (!blockable) {
|
||||
spin_unlock(&mn->lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
/* The mmu_object is released late when destroying the
|
||||
* GEM object so it is entirely possible to gain a
|
||||
* reference on an object in the process of being freed
|
||||
@@ -154,6 +159,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
|
||||
if (!list_empty(&cancelled))
|
||||
flush_workqueue(mn->wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
|
||||
|
||||
@@ -118,19 +118,27 @@ static void radeon_mn_release(struct mmu_notifier *mn,
|
||||
* We block for all BOs between start and end to be idle and
|
||||
* unmap them by move them into system domain again.
|
||||
*/
|
||||
static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
{
|
||||
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct interval_tree_node *it;
|
||||
int ret = 0;
|
||||
|
||||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
|
||||
mutex_lock(&rmn->lock);
|
||||
/* TODO we should be able to split locking for interval tree and
|
||||
* the tear down.
|
||||
*/
|
||||
if (blockable)
|
||||
mutex_lock(&rmn->lock);
|
||||
else if (!mutex_trylock(&rmn->lock))
|
||||
return -EAGAIN;
|
||||
|
||||
it = interval_tree_iter_first(&rmn->objects, start, end);
|
||||
while (it) {
|
||||
@@ -138,6 +146,11 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct radeon_bo *bo;
|
||||
long r;
|
||||
|
||||
if (!blockable) {
|
||||
ret = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
node = container_of(it, struct radeon_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
|
||||
@@ -166,7 +179,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&rmn->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops radeon_mn_ops = {
|
||||
|
||||
@@ -186,6 +186,7 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
|
||||
rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
|
||||
ULLONG_MAX,
|
||||
ib_umem_notifier_release_trampoline,
|
||||
true,
|
||||
NULL);
|
||||
up_read(&context->umem_rwsem);
|
||||
}
|
||||
@@ -207,22 +208,31 @@ static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
||||
static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
{
|
||||
struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
|
||||
int ret;
|
||||
|
||||
if (!context->invalidate_range)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (blockable)
|
||||
down_read(&context->umem_rwsem);
|
||||
else if (!down_read_trylock(&context->umem_rwsem))
|
||||
return -EAGAIN;
|
||||
|
||||
ib_ucontext_notifier_start_account(context);
|
||||
down_read(&context->umem_rwsem);
|
||||
rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
|
||||
ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
|
||||
end,
|
||||
invalidate_range_start_trampoline, NULL);
|
||||
invalidate_range_start_trampoline,
|
||||
blockable, NULL);
|
||||
up_read(&context->umem_rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
|
||||
@@ -242,10 +252,15 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
|
||||
if (!context->invalidate_range)
|
||||
return;
|
||||
|
||||
/*
|
||||
* TODO: we currently bail out if there is any sleepable work to be done
|
||||
* in ib_umem_notifier_invalidate_range_start so we shouldn't really block
|
||||
* here. But this is ugly and fragile.
|
||||
*/
|
||||
down_read(&context->umem_rwsem);
|
||||
rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
|
||||
end,
|
||||
invalidate_range_end_trampoline, NULL);
|
||||
invalidate_range_end_trampoline, true, NULL);
|
||||
up_read(&context->umem_rwsem);
|
||||
ib_ucontext_notifier_end_account(context);
|
||||
}
|
||||
@@ -798,6 +813,7 @@ EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
|
||||
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
|
||||
u64 start, u64 last,
|
||||
umem_call_back cb,
|
||||
bool blockable,
|
||||
void *cookie)
|
||||
{
|
||||
int ret_val = 0;
|
||||
@@ -809,6 +825,9 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
|
||||
|
||||
for (node = rbt_ib_umem_iter_first(root, start, last - 1);
|
||||
node; node = next) {
|
||||
/* TODO move the blockable decision up to the callback */
|
||||
if (!blockable)
|
||||
return -EAGAIN;
|
||||
next = rbt_ib_umem_iter_next(node, start, last - 1);
|
||||
umem = container_of(node, struct ib_umem_odp, interval_tree);
|
||||
ret_val = cb(umem->umem, start, last, cookie) || ret_val;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user