You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
lib/interval_tree: fast overlap detection
Allow interval trees to quickly check for overlaps to avoid unnecesary tree lookups in interval_tree_iter_first(). As of this patch, all interval tree flavors will require using a 'rb_root_cached' such that we can have the leftmost node easily available. While most users will make use of this feature, those with special functions (in addition to the generic insert, delete, search calls) will avoid using the cached option as they can do funky things with insertions -- for example, vma_interval_tree_insert_after(). [jglisse@redhat.com: fix deadlock from typo vm_lock_anon_vma()] Link: http://lkml.kernel.org/r/20170808225719.20723-1-jglisse@redhat.com Link: http://lkml.kernel.org/r/20170719014603.19029-12-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Doug Ledford <dledford@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Jason Wang <jasowang@redhat.com> Cc: Christian Benvenuti <benve@cisco.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
09663c86e2
commit
f808c13fd3
@@ -51,7 +51,7 @@ struct amdgpu_mn {
|
||||
|
||||
/* objects protected by lock */
|
||||
struct mutex lock;
|
||||
struct rb_root objects;
|
||||
struct rb_root_cached objects;
|
||||
};
|
||||
|
||||
struct amdgpu_mn_node {
|
||||
@@ -76,8 +76,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
|
||||
mutex_lock(&adev->mn_lock);
|
||||
mutex_lock(&rmn->lock);
|
||||
hash_del(&rmn->node);
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
|
||||
it.rb) {
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node,
|
||||
&rmn->objects.rb_root, it.rb) {
|
||||
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
|
||||
bo->mn = NULL;
|
||||
list_del_init(&bo->mn_list);
|
||||
@@ -221,7 +221,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
||||
rmn->mm = mm;
|
||||
rmn->mn.ops = &amdgpu_mn_ops;
|
||||
mutex_init(&rmn->lock);
|
||||
rmn->objects = RB_ROOT;
|
||||
rmn->objects = RB_ROOT_CACHED;
|
||||
|
||||
r = __mmu_notifier_register(&rmn->mn, mm);
|
||||
if (r)
|
||||
|
||||
@@ -2475,7 +2475,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u64 flags;
|
||||
uint64_t init_pde_value = 0;
|
||||
|
||||
vm->va = RB_ROOT;
|
||||
vm->va = RB_ROOT_CACHED;
|
||||
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
|
||||
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
|
||||
vm->reserved_vmid[i] = NULL;
|
||||
@@ -2596,10 +2596,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
|
||||
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
|
||||
|
||||
if (!RB_EMPTY_ROOT(&vm->va)) {
|
||||
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
||||
dev_err(adev->dev, "still active bo inside vm\n");
|
||||
}
|
||||
rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
|
||||
rbtree_postorder_for_each_entry_safe(mapping, tmp,
|
||||
&vm->va.rb_root, rb) {
|
||||
list_del(&mapping->list);
|
||||
amdgpu_vm_it_remove(mapping, &vm->va);
|
||||
kfree(mapping);
|
||||
|
||||
@@ -118,7 +118,7 @@ struct amdgpu_vm_pt {
|
||||
|
||||
struct amdgpu_vm {
|
||||
/* tree of virtual addresses mapped */
|
||||
struct rb_root va;
|
||||
struct rb_root_cached va;
|
||||
|
||||
/* protecting invalidated */
|
||||
spinlock_t status_lock;
|
||||
|
||||
@@ -169,7 +169,7 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
|
||||
struct drm_mm_node *
|
||||
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
|
||||
{
|
||||
return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
|
||||
return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
|
||||
start, last) ?: (struct drm_mm_node *)&mm->head_node;
|
||||
}
|
||||
EXPORT_SYMBOL(__drm_mm_interval_first);
|
||||
@@ -180,6 +180,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
|
||||
struct drm_mm *mm = hole_node->mm;
|
||||
struct rb_node **link, *rb;
|
||||
struct drm_mm_node *parent;
|
||||
bool leftmost = true;
|
||||
|
||||
node->__subtree_last = LAST(node);
|
||||
|
||||
@@ -196,9 +197,10 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
|
||||
|
||||
rb = &hole_node->rb;
|
||||
link = &hole_node->rb.rb_right;
|
||||
leftmost = false;
|
||||
} else {
|
||||
rb = NULL;
|
||||
link = &mm->interval_tree.rb_node;
|
||||
link = &mm->interval_tree.rb_root.rb_node;
|
||||
}
|
||||
|
||||
while (*link) {
|
||||
@@ -208,14 +210,15 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
|
||||
parent->__subtree_last = node->__subtree_last;
|
||||
if (node->start < parent->start)
|
||||
link = &parent->rb.rb_left;
|
||||
else
|
||||
else {
|
||||
link = &parent->rb.rb_right;
|
||||
leftmost = true;
|
||||
}
|
||||
}
|
||||
|
||||
rb_link_node(&node->rb, rb, link);
|
||||
rb_insert_augmented(&node->rb,
|
||||
&mm->interval_tree,
|
||||
&drm_mm_interval_tree_augment);
|
||||
rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
|
||||
&drm_mm_interval_tree_augment);
|
||||
}
|
||||
|
||||
#define RB_INSERT(root, member, expr) do { \
|
||||
@@ -577,7 +580,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
|
||||
*new = *old;
|
||||
|
||||
list_replace(&old->node_list, &new->node_list);
|
||||
rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
|
||||
rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
|
||||
|
||||
if (drm_mm_hole_follows(old)) {
|
||||
list_replace(&old->hole_stack, &new->hole_stack);
|
||||
@@ -863,7 +866,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
|
||||
mm->color_adjust = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&mm->hole_stack);
|
||||
mm->interval_tree = RB_ROOT;
|
||||
mm->interval_tree = RB_ROOT_CACHED;
|
||||
mm->holes_size = RB_ROOT;
|
||||
mm->holes_addr = RB_ROOT;
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
|
||||
struct rb_node *iter;
|
||||
unsigned long offset;
|
||||
|
||||
iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
|
||||
iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
|
||||
best = NULL;
|
||||
|
||||
while (likely(iter)) {
|
||||
|
||||
@@ -49,7 +49,7 @@ struct i915_mmu_notifier {
|
||||
spinlock_t lock;
|
||||
struct hlist_node node;
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root objects;
|
||||
struct rb_root_cached objects;
|
||||
struct workqueue_struct *wq;
|
||||
};
|
||||
|
||||
@@ -123,7 +123,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
struct interval_tree_node *it;
|
||||
LIST_HEAD(cancelled);
|
||||
|
||||
if (RB_EMPTY_ROOT(&mn->objects))
|
||||
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
|
||||
return;
|
||||
|
||||
/* interval ranges are inclusive, but invalidate range is exclusive */
|
||||
@@ -172,7 +172,7 @@ i915_mmu_notifier_create(struct mm_struct *mm)
|
||||
|
||||
spin_lock_init(&mn->lock);
|
||||
mn->mn.ops = &i915_gem_userptr_notifier;
|
||||
mn->objects = RB_ROOT;
|
||||
mn->objects = RB_ROOT_CACHED;
|
||||
mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
|
||||
if (mn->wq == NULL) {
|
||||
kfree(mn);
|
||||
|
||||
@@ -924,7 +924,7 @@ struct radeon_vm_id {
|
||||
struct radeon_vm {
|
||||
struct mutex mutex;
|
||||
|
||||
struct rb_root va;
|
||||
struct rb_root_cached va;
|
||||
|
||||
/* protecting invalidated and freed */
|
||||
spinlock_t status_lock;
|
||||
|
||||
@@ -50,7 +50,7 @@ struct radeon_mn {
|
||||
|
||||
/* objects protected by lock */
|
||||
struct mutex lock;
|
||||
struct rb_root objects;
|
||||
struct rb_root_cached objects;
|
||||
};
|
||||
|
||||
struct radeon_mn_node {
|
||||
@@ -75,8 +75,8 @@ static void radeon_mn_destroy(struct work_struct *work)
|
||||
mutex_lock(&rdev->mn_lock);
|
||||
mutex_lock(&rmn->lock);
|
||||
hash_del(&rmn->node);
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
|
||||
it.rb) {
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node,
|
||||
&rmn->objects.rb_root, it.rb) {
|
||||
|
||||
interval_tree_remove(&node->it, &rmn->objects);
|
||||
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
|
||||
@@ -205,7 +205,7 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
|
||||
rmn->mm = mm;
|
||||
rmn->mn.ops = &radeon_mn_ops;
|
||||
mutex_init(&rmn->lock);
|
||||
rmn->objects = RB_ROOT;
|
||||
rmn->objects = RB_ROOT_CACHED;
|
||||
|
||||
r = __mmu_notifier_register(&rmn->mn, mm);
|
||||
if (r)
|
||||
|
||||
@@ -1185,7 +1185,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
vm->ids[i].last_id_use = NULL;
|
||||
}
|
||||
mutex_init(&vm->mutex);
|
||||
vm->va = RB_ROOT;
|
||||
vm->va = RB_ROOT_CACHED;
|
||||
spin_lock_init(&vm->status_lock);
|
||||
INIT_LIST_HEAD(&vm->invalidated);
|
||||
INIT_LIST_HEAD(&vm->freed);
|
||||
@@ -1232,10 +1232,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
struct radeon_bo_va *bo_va, *tmp;
|
||||
int i, r;
|
||||
|
||||
if (!RB_EMPTY_ROOT(&vm->va)) {
|
||||
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
||||
dev_err(rdev->dev, "still active bo inside vm\n");
|
||||
}
|
||||
rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
|
||||
rbtree_postorder_for_each_entry_safe(bo_va, tmp,
|
||||
&vm->va.rb_root, it.rb) {
|
||||
interval_tree_remove(&bo_va->it, &vm->va);
|
||||
r = radeon_bo_reserve(bo_va->bo, false);
|
||||
if (!r) {
|
||||
|
||||
@@ -72,7 +72,7 @@ INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
|
||||
/* @last is not a part of the interval. See comment for function
|
||||
* node_last.
|
||||
*/
|
||||
int rbt_ib_umem_for_each_in_range(struct rb_root *root,
|
||||
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
|
||||
u64 start, u64 last,
|
||||
umem_call_back cb,
|
||||
void *cookie)
|
||||
@@ -95,7 +95,7 @@ int rbt_ib_umem_for_each_in_range(struct rb_root *root,
|
||||
}
|
||||
EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
|
||||
|
||||
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root,
|
||||
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
|
||||
u64 addr, u64 length)
|
||||
{
|
||||
struct umem_odp_node *node;
|
||||
|
||||
@@ -118,7 +118,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
|
||||
ucontext->closing = 0;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
ucontext->umem_tree = RB_ROOT;
|
||||
ucontext->umem_tree = RB_ROOT_CACHED;
|
||||
init_rwsem(&ucontext->umem_rwsem);
|
||||
ucontext->odp_mrs_count = 0;
|
||||
INIT_LIST_HEAD(&ucontext->no_private_counters);
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
|
||||
struct mmu_rb_handler {
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root root;
|
||||
struct rb_root_cached root;
|
||||
void *ops_arg;
|
||||
spinlock_t lock; /* protect the RB tree */
|
||||
struct mmu_rb_ops *ops;
|
||||
@@ -108,7 +108,7 @@ int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
|
||||
if (!handlr)
|
||||
return -ENOMEM;
|
||||
|
||||
handlr->root = RB_ROOT;
|
||||
handlr->root = RB_ROOT_CACHED;
|
||||
handlr->ops = ops;
|
||||
handlr->ops_arg = ops_arg;
|
||||
INIT_HLIST_NODE(&handlr->mn.hlist);
|
||||
@@ -149,9 +149,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
while ((node = rb_first(&handler->root))) {
|
||||
while ((node = rb_first_cached(&handler->root))) {
|
||||
rbnode = rb_entry(node, struct mmu_rb_node, node);
|
||||
rb_erase(node, &handler->root);
|
||||
rb_erase_cached(node, &handler->root);
|
||||
/* move from LRU list to delete list */
|
||||
list_move(&rbnode->list, &del_list);
|
||||
}
|
||||
@@ -300,7 +300,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
|
||||
{
|
||||
struct mmu_rb_handler *handler =
|
||||
container_of(mn, struct mmu_rb_handler, mn);
|
||||
struct rb_root *root = &handler->root;
|
||||
struct rb_root_cached *root = &handler->root;
|
||||
struct mmu_rb_node *node, *ptr = NULL;
|
||||
unsigned long flags;
|
||||
bool added = false;
|
||||
|
||||
@@ -227,7 +227,7 @@ static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
|
||||
vpn_last = vpn_start + npages - 1;
|
||||
|
||||
spin_lock(&pd->lock);
|
||||
usnic_uiom_remove_interval(&pd->rb_root, vpn_start,
|
||||
usnic_uiom_remove_interval(&pd->root, vpn_start,
|
||||
vpn_last, &rm_intervals);
|
||||
usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
|
||||
|
||||
@@ -379,7 +379,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
|
||||
err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
|
||||
(writable) ? IOMMU_WRITE : 0,
|
||||
IOMMU_WRITE,
|
||||
&pd->rb_root,
|
||||
&pd->root,
|
||||
&sorted_diff_intervals);
|
||||
if (err) {
|
||||
usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
|
||||
@@ -395,7 +395,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
|
||||
|
||||
}
|
||||
|
||||
err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last,
|
||||
err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
|
||||
(writable) ? IOMMU_WRITE : 0);
|
||||
if (err) {
|
||||
usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
|
||||
|
||||
@@ -55,7 +55,7 @@ struct usnic_uiom_dev {
|
||||
struct usnic_uiom_pd {
|
||||
struct iommu_domain *domain;
|
||||
spinlock_t lock;
|
||||
struct rb_root rb_root;
|
||||
struct rb_root_cached root;
|
||||
struct list_head devs;
|
||||
int dev_cnt;
|
||||
};
|
||||
|
||||
@@ -100,9 +100,9 @@ static int interval_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||
}
|
||||
|
||||
static void
|
||||
find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
|
||||
unsigned long last,
|
||||
struct list_head *list)
|
||||
find_intervals_intersection_sorted(struct rb_root_cached *root,
|
||||
unsigned long start, unsigned long last,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct usnic_uiom_interval_node *node;
|
||||
|
||||
@@ -118,7 +118,7 @@ find_intervals_intersection_sorted(struct rb_root *root, unsigned long start,
|
||||
|
||||
int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last,
|
||||
int flags, int flag_mask,
|
||||
struct rb_root *root,
|
||||
struct rb_root_cached *root,
|
||||
struct list_head *diff_set)
|
||||
{
|
||||
struct usnic_uiom_interval_node *interval, *tmp;
|
||||
@@ -175,7 +175,7 @@ void usnic_uiom_put_interval_set(struct list_head *intervals)
|
||||
kfree(interval);
|
||||
}
|
||||
|
||||
int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start,
|
||||
int usnic_uiom_insert_interval(struct rb_root_cached *root, unsigned long start,
|
||||
unsigned long last, int flags)
|
||||
{
|
||||
struct usnic_uiom_interval_node *interval, *tmp;
|
||||
@@ -246,8 +246,9 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start,
|
||||
unsigned long last, struct list_head *removed)
|
||||
void usnic_uiom_remove_interval(struct rb_root_cached *root,
|
||||
unsigned long start, unsigned long last,
|
||||
struct list_head *removed)
|
||||
{
|
||||
struct usnic_uiom_interval_node *interval;
|
||||
|
||||
|
||||
@@ -48,12 +48,12 @@ struct usnic_uiom_interval_node {
|
||||
|
||||
extern void
|
||||
usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node,
|
||||
struct rb_root *root);
|
||||
struct rb_root_cached *root);
|
||||
extern void
|
||||
usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node,
|
||||
struct rb_root *root);
|
||||
struct rb_root_cached *root);
|
||||
extern struct usnic_uiom_interval_node *
|
||||
usnic_uiom_interval_tree_iter_first(struct rb_root *root,
|
||||
usnic_uiom_interval_tree_iter_first(struct rb_root_cached *root,
|
||||
unsigned long start,
|
||||
unsigned long last);
|
||||
extern struct usnic_uiom_interval_node *
|
||||
@@ -63,7 +63,7 @@ usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node,
|
||||
* Inserts {start...last} into {root}. If there are overlaps,
|
||||
* nodes will be broken up and merged
|
||||
*/
|
||||
int usnic_uiom_insert_interval(struct rb_root *root,
|
||||
int usnic_uiom_insert_interval(struct rb_root_cached *root,
|
||||
unsigned long start, unsigned long last,
|
||||
int flags);
|
||||
/*
|
||||
@@ -71,7 +71,7 @@ int usnic_uiom_insert_interval(struct rb_root *root,
|
||||
* 'removed.' The caller is responsibile for freeing memory of nodes in
|
||||
* 'removed.'
|
||||
*/
|
||||
void usnic_uiom_remove_interval(struct rb_root *root,
|
||||
void usnic_uiom_remove_interval(struct rb_root_cached *root,
|
||||
unsigned long start, unsigned long last,
|
||||
struct list_head *removed);
|
||||
/*
|
||||
@@ -81,7 +81,7 @@ void usnic_uiom_remove_interval(struct rb_root *root,
|
||||
int usnic_uiom_get_intervals_diff(unsigned long start,
|
||||
unsigned long last, int flags,
|
||||
int flag_mask,
|
||||
struct rb_root *root,
|
||||
struct rb_root_cached *root,
|
||||
struct list_head *diff_set);
|
||||
/* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */
|
||||
void usnic_uiom_put_interval_set(struct list_head *intervals);
|
||||
|
||||
@@ -1271,7 +1271,7 @@ static struct vhost_umem *vhost_umem_alloc(void)
|
||||
if (!umem)
|
||||
return NULL;
|
||||
|
||||
umem->umem_tree = RB_ROOT;
|
||||
umem->umem_tree = RB_ROOT_CACHED;
|
||||
umem->numem = 0;
|
||||
INIT_LIST_HEAD(&umem->umem_list);
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ struct vhost_umem_node {
|
||||
};
|
||||
|
||||
struct vhost_umem {
|
||||
struct rb_root umem_tree;
|
||||
struct rb_root_cached umem_tree;
|
||||
struct list_head umem_list;
|
||||
int numem;
|
||||
};
|
||||
|
||||
@@ -334,7 +334,7 @@ static void remove_huge_page(struct page *page)
|
||||
}
|
||||
|
||||
static void
|
||||
hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
|
||||
hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
@@ -498,7 +498,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
|
||||
|
||||
i_size_write(inode, offset);
|
||||
i_mmap_lock_write(mapping);
|
||||
if (!RB_EMPTY_ROOT(&mapping->i_mmap))
|
||||
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
|
||||
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
|
||||
i_mmap_unlock_write(mapping);
|
||||
remove_inode_hugepages(inode, offset, LLONG_MAX);
|
||||
@@ -523,7 +523,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
|
||||
inode_lock(inode);
|
||||
i_mmap_lock_write(mapping);
|
||||
if (!RB_EMPTY_ROOT(&mapping->i_mmap))
|
||||
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
|
||||
hugetlb_vmdelete_list(&mapping->i_mmap,
|
||||
hole_start >> PAGE_SHIFT,
|
||||
hole_end >> PAGE_SHIFT);
|
||||
|
||||
@@ -353,7 +353,7 @@ void address_space_init_once(struct address_space *mapping)
|
||||
init_rwsem(&mapping->i_mmap_rwsem);
|
||||
INIT_LIST_HEAD(&mapping->private_list);
|
||||
spin_lock_init(&mapping->private_lock);
|
||||
mapping->i_mmap = RB_ROOT;
|
||||
mapping->i_mmap = RB_ROOT_CACHED;
|
||||
}
|
||||
EXPORT_SYMBOL(address_space_init_once);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user