aio/migratepages: make aio migrate pages sane

The arbitrary restriction on page counts offered by the core
migrate_page_move_mapping() code results in rather suspicious looking
fiddling with page reference counts in the aio_migratepage() operation.
To fix this, make migrate_page_move_mapping() take an extra_count parameter
that allows aio to tell the code about its own reference count on the page
being migrated.

While cleaning up aio_migratepage(), make it validate that the old page
being passed in is actually what aio_migratepage() expects to prevent
misbehaviour in the case of races.

Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
This commit is contained in:
Benjamin LaHaise
2013-12-21 17:56:08 -05:00
parent 1881686f84
commit 8e321fefb0
3 changed files with 53 additions and 15 deletions
+44 -8
View File
@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
int i;
for (i = 0; i < ctx->nr_pages; i++) {
struct page *page;
pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
page_count(ctx->ring_pages[i]));
put_page(ctx->ring_pages[i]);
page = ctx->ring_pages[i];
if (!page)
continue;
ctx->ring_pages[i] = NULL;
put_page(page);
}
put_aio_ring_file(ctx);
@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
unsigned long flags;
int rc;
rc = 0;
/* Make sure the old page hasn't already been changed */
spin_lock(&mapping->private_lock);
ctx = mapping->private_data;
if (ctx) {
pgoff_t idx;
spin_lock_irqsave(&ctx->completion_lock, flags);
idx = old->index;
if (idx < (pgoff_t)ctx->nr_pages) {
if (ctx->ring_pages[idx] != old)
rc = -EAGAIN;
} else
rc = -EINVAL;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else
rc = -EINVAL;
spin_unlock(&mapping->private_lock);
if (rc != 0)
return rc;
/* Writeback must be complete */
BUG_ON(PageWriteback(old));
put_page(old);
get_page(new);
rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
if (rc != MIGRATEPAGE_SUCCESS) {
get_page(old);
put_page(new);
return rc;
}
get_page(new);
/* We can potentially race against kioctx teardown here. Use the
* address_space's private data lock to protect the mapping's
* private_data.
@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
spin_lock_irqsave(&ctx->completion_lock, flags);
migrate_page_copy(new, old);
idx = old->index;
if (idx < (pgoff_t)ctx->nr_pages)
ctx->ring_pages[idx] = new;
if (idx < (pgoff_t)ctx->nr_pages) {
/* And only do the move if things haven't changed */
if (ctx->ring_pages[idx] == old)
ctx->ring_pages[idx] = new;
else
rc = -EAGAIN;
} else
rc = -EINVAL;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else
rc = -EBUSY;
spin_unlock(&mapping->private_lock);
if (rc == MIGRATEPAGE_SUCCESS)
put_page(old);
else
put_page(new);
return rc;
}
#endif