mirror of
https://github.com/armbian/linux.git
synced 2026-01-06 10:13:00 -08:00
mm: fix crashes from mbind() merging vmas
commitd05f0cdcbeupstream. In v2.6.34 commit9d8cebd4bc("mm: fix mbind vma merge problem") introduced vma merging to mbind(), but it should have also changed the convention of passing start vma from queue_pages_range() (formerly check_range()) to new_vma_page(): vma merging may have already freed that structure, resulting in BUG at mm/mempolicy.c:1738 and probably worse crashes. Fixes:9d8cebd4bc("mm: fix mbind vma merge problem") Reported-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
9b576da0f7
commit
f76d0efeb6
@@ -608,19 +608,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
|
||||
* If pagelist != NULL then isolate pages from the LRU and
|
||||
* put them on the pagelist.
|
||||
*/
|
||||
static struct vm_area_struct *
|
||||
static int
|
||||
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
const nodemask_t *nodes, unsigned long flags, void *private)
|
||||
{
|
||||
int err;
|
||||
struct vm_area_struct *first, *vma, *prev;
|
||||
int err = 0;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
|
||||
|
||||
first = find_vma(mm, start);
|
||||
if (!first)
|
||||
return ERR_PTR(-EFAULT);
|
||||
vma = find_vma(mm, start);
|
||||
if (!vma)
|
||||
return -EFAULT;
|
||||
prev = NULL;
|
||||
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
|
||||
for (; vma && vma->vm_start < end; vma = vma->vm_next) {
|
||||
unsigned long endvma = vma->vm_end;
|
||||
|
||||
if (endvma > end)
|
||||
@@ -630,9 +629,9 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
|
||||
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
|
||||
if (!vma->vm_next && vma->vm_end < end)
|
||||
return ERR_PTR(-EFAULT);
|
||||
return -EFAULT;
|
||||
if (prev && prev->vm_end < vma->vm_start)
|
||||
return ERR_PTR(-EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
@@ -649,15 +648,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
|
||||
err = check_pgd_range(vma, start, endvma, nodes,
|
||||
flags, private);
|
||||
if (err) {
|
||||
first = ERR_PTR(err);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
}
|
||||
next:
|
||||
prev = vma;
|
||||
}
|
||||
return first;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1138,16 +1135,17 @@ out:
|
||||
|
||||
/*
|
||||
* Allocate a new page for page migration based on vma policy.
|
||||
* Start assuming that page is mapped by vma pointed to by @private.
|
||||
* Start by assuming the page is mapped by the same vma as contains @start.
|
||||
* Search forward from there, if not. N.B., this assumes that the
|
||||
* list of pages handed to migrate_pages()--which is how we get here--
|
||||
* is in virtual address order.
|
||||
*/
|
||||
static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
|
||||
static struct page *new_page(struct page *page, unsigned long start, int **x)
|
||||
{
|
||||
struct vm_area_struct *vma = (struct vm_area_struct *)private;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long uninitialized_var(address);
|
||||
|
||||
vma = find_vma(current->mm, start);
|
||||
while (vma) {
|
||||
address = page_address_in_vma(page, vma);
|
||||
if (address != -EFAULT)
|
||||
@@ -1173,7 +1171,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
|
||||
static struct page *new_page(struct page *page, unsigned long start, int **x)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -1183,7 +1181,6 @@ static long do_mbind(unsigned long start, unsigned long len,
|
||||
unsigned short mode, unsigned short mode_flags,
|
||||
nodemask_t *nmask, unsigned long flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct mempolicy *new;
|
||||
unsigned long end;
|
||||
@@ -1249,11 +1246,9 @@ static long do_mbind(unsigned long start, unsigned long len,
|
||||
if (err)
|
||||
goto mpol_out;
|
||||
|
||||
vma = check_range(mm, start, end, nmask,
|
||||
err = check_range(mm, start, end, nmask,
|
||||
flags | MPOL_MF_INVERT, &pagelist);
|
||||
|
||||
err = PTR_ERR(vma); /* maybe ... */
|
||||
if (!IS_ERR(vma))
|
||||
if (!err)
|
||||
err = mbind_range(mm, start, end, new);
|
||||
|
||||
if (!err) {
|
||||
@@ -1261,9 +1256,8 @@ static long do_mbind(unsigned long start, unsigned long len,
|
||||
|
||||
if (!list_empty(&pagelist)) {
|
||||
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
|
||||
nr_failed = migrate_pages(&pagelist, new_vma_page,
|
||||
(unsigned long)vma,
|
||||
MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
|
||||
nr_failed = migrate_pages(&pagelist, new_page,
|
||||
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
|
||||
if (nr_failed)
|
||||
putback_lru_pages(&pagelist);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user