You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few misc things - most of MM - KASAN updates * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (102 commits) kasan: separate report parts by empty lines kasan: improve double-free report format kasan: print page description after stacks kasan: improve slab object description kasan: change report header kasan: simplify address description logic kasan: change allocation and freeing stack traces headers kasan: unify report headers kasan: introduce helper functions for determining bug type mm: hwpoison: call shake_page() after try_to_unmap() for mlocked page mm: hwpoison: call shake_page() unconditionally mm/swapfile.c: fix swap space leak in error path of swap_free_entries() mm/gup.c: fix access_ok() argument type mm/truncate: avoid pointless cleancache_invalidate_inode() calls. mm/truncate: bail out early from invalidate_inode_pages2_range() if mapping is empty fs/block_dev: always invalidate cleancache in invalidate_bdev() fs: fix data invalidation in the cleancache during direct IO zram: reduce load operation in page_same_filled zram: use zram_free_page instead of open-coded zram: introduce zram data accessor ...
This commit is contained in:
@@ -871,6 +871,11 @@ PAGE_SIZE multiple when read back.
|
|||||||
|
|
||||||
Amount of memory used in network transmission buffers
|
Amount of memory used in network transmission buffers
|
||||||
|
|
||||||
|
shmem
|
||||||
|
|
||||||
|
Amount of cached filesystem data that is swap-backed,
|
||||||
|
such as tmpfs, shm segments, shared anonymous mmap()s
|
||||||
|
|
||||||
file_mapped
|
file_mapped
|
||||||
|
|
||||||
Amount of cached filesystem data mapped with mmap()
|
Amount of cached filesystem data mapped with mmap()
|
||||||
|
|||||||
@@ -413,6 +413,7 @@ Private_Clean: 0 kB
|
|||||||
Private_Dirty: 0 kB
|
Private_Dirty: 0 kB
|
||||||
Referenced: 892 kB
|
Referenced: 892 kB
|
||||||
Anonymous: 0 kB
|
Anonymous: 0 kB
|
||||||
|
LazyFree: 0 kB
|
||||||
AnonHugePages: 0 kB
|
AnonHugePages: 0 kB
|
||||||
ShmemPmdMapped: 0 kB
|
ShmemPmdMapped: 0 kB
|
||||||
Shared_Hugetlb: 0 kB
|
Shared_Hugetlb: 0 kB
|
||||||
@@ -442,6 +443,11 @@ accessed.
|
|||||||
"Anonymous" shows the amount of memory that does not belong to any file. Even
|
"Anonymous" shows the amount of memory that does not belong to any file. Even
|
||||||
a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
|
a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
|
||||||
and a page is modified, the file page is replaced by a private anonymous copy.
|
and a page is modified, the file page is replaced by a private anonymous copy.
|
||||||
|
"LazyFree" shows the amount of memory which is marked by madvise(MADV_FREE).
|
||||||
|
The memory isn't freed immediately with madvise(). It's freed in memory
|
||||||
|
pressure if the memory is clean. Please note that the printed value might
|
||||||
|
be lower than the real value due to optimizations used in the current
|
||||||
|
implementation. If this is not desirable please file a bug report.
|
||||||
"AnonHugePages" shows the ammount of memory backed by transparent hugepage.
|
"AnonHugePages" shows the ammount of memory backed by transparent hugepage.
|
||||||
"ShmemPmdMapped" shows the ammount of shared (shmem/tmpfs) memory backed by
|
"ShmemPmdMapped" shows the ammount of shared (shmem/tmpfs) memory backed by
|
||||||
huge pages.
|
huge pages.
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ highmem.txt
|
|||||||
- Outline of highmem and common issues.
|
- Outline of highmem and common issues.
|
||||||
hugetlbpage.txt
|
hugetlbpage.txt
|
||||||
- a brief summary of hugetlbpage support in the Linux kernel.
|
- a brief summary of hugetlbpage support in the Linux kernel.
|
||||||
|
hugetlbfs_reserv.txt
|
||||||
|
- A brief overview of hugetlbfs reservation design/implementation.
|
||||||
hwpoison.txt
|
hwpoison.txt
|
||||||
- explains what hwpoison is
|
- explains what hwpoison is
|
||||||
idle_page_tracking.txt
|
idle_page_tracking.txt
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -97,6 +97,9 @@ EXPORT_SYMBOL(clk_enable);
|
|||||||
|
|
||||||
void clk_disable(struct clk *clk)
|
void clk_disable(struct clk *clk)
|
||||||
{
|
{
|
||||||
|
if (!clk)
|
||||||
|
return;
|
||||||
|
|
||||||
if (clk->ops && clk->ops->disable)
|
if (clk->ops && clk->ops->disable)
|
||||||
clk->ops->disable(clk);
|
clk->ops->disable(clk);
|
||||||
}
|
}
|
||||||
|
|||||||
+296
-303
File diff suppressed because it is too large
Load Diff
@@ -92,13 +92,9 @@ struct zram_stats {
|
|||||||
atomic64_t writestall; /* no. of write slow paths */
|
atomic64_t writestall; /* no. of write slow paths */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct zram_meta {
|
struct zram {
|
||||||
struct zram_table_entry *table;
|
struct zram_table_entry *table;
|
||||||
struct zs_pool *mem_pool;
|
struct zs_pool *mem_pool;
|
||||||
};
|
|
||||||
|
|
||||||
struct zram {
|
|
||||||
struct zram_meta *meta;
|
|
||||||
struct zcomp *comp;
|
struct zcomp *comp;
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
/* Prevent concurrent execution of device init */
|
/* Prevent concurrent execution of device init */
|
||||||
|
|||||||
+1
-1
@@ -372,7 +372,7 @@ static void moom_callback(struct work_struct *ignored)
|
|||||||
|
|
||||||
mutex_lock(&oom_lock);
|
mutex_lock(&oom_lock);
|
||||||
if (!out_of_memory(&oc))
|
if (!out_of_memory(&oc))
|
||||||
pr_info("OOM request ignored because killer is disabled\n");
|
pr_info("OOM request ignored. No task eligible\n");
|
||||||
mutex_unlock(&oom_lock);
|
mutex_unlock(&oom_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
+5
-6
@@ -103,12 +103,11 @@ void invalidate_bdev(struct block_device *bdev)
|
|||||||
{
|
{
|
||||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||||
|
|
||||||
if (mapping->nrpages == 0)
|
if (mapping->nrpages) {
|
||||||
return;
|
invalidate_bh_lrus();
|
||||||
|
lru_add_drain_all(); /* make sure all lru add caches are flushed */
|
||||||
invalidate_bh_lrus();
|
invalidate_mapping_pages(mapping, 0, -1);
|
||||||
lru_add_drain_all(); /* make sure all lru add caches are flushed */
|
}
|
||||||
invalidate_mapping_pages(mapping, 0, -1);
|
|
||||||
/* 99% of the time, we don't need to flush the cleancache on the bdev.
|
/* 99% of the time, we don't need to flush the cleancache on the bdev.
|
||||||
* But, for the strange corners, lets be cautious
|
* But, for the strange corners, lets be cautious
|
||||||
*/
|
*/
|
||||||
|
|||||||
+8
-10
@@ -887,16 +887,14 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
flags |= IOMAP_WRITE;
|
flags |= IOMAP_WRITE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mapping->nrpages) {
|
ret = filemap_write_and_wait_range(mapping, start, end);
|
||||||
ret = filemap_write_and_wait_range(mapping, start, end);
|
if (ret)
|
||||||
if (ret)
|
goto out_free_dio;
|
||||||
goto out_free_dio;
|
|
||||||
|
|
||||||
ret = invalidate_inode_pages2_range(mapping,
|
ret = invalidate_inode_pages2_range(mapping,
|
||||||
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
||||||
WARN_ON_ONCE(ret);
|
WARN_ON_ONCE(ret);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
|
||||||
|
|
||||||
inode_dio_begin(inode);
|
inode_dio_begin(inode);
|
||||||
|
|
||||||
@@ -951,7 +949,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
* one is a pretty crazy thing to do, so we don't support it 100%. If
|
* one is a pretty crazy thing to do, so we don't support it 100%. If
|
||||||
* this invalidation fails, tough, the write still worked...
|
* this invalidation fails, tough, the write still worked...
|
||||||
*/
|
*/
|
||||||
if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
|
if (iov_iter_rw(iter) == WRITE) {
|
||||||
int err = invalidate_inode_pages2_range(mapping,
|
int err = invalidate_inode_pages2_range(mapping,
|
||||||
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
||||||
WARN_ON_ONCE(err);
|
WARN_ON_ONCE(err);
|
||||||
|
|||||||
@@ -43,6 +43,7 @@
|
|||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/ratelimit.h>
|
#include <linux/ratelimit.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/jbd2.h>
|
#include <trace/events/jbd2.h>
|
||||||
@@ -205,6 +206,14 @@ static int kjournald2(void *arg)
|
|||||||
journal->j_task = current;
|
journal->j_task = current;
|
||||||
wake_up(&journal->j_wait_done_commit);
|
wake_up(&journal->j_wait_done_commit);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure that no allocations from this kernel thread will ever
|
||||||
|
* recurse to the fs layer because we are responsible for the
|
||||||
|
* transaction commit and any fs involvement might get stuck waiting for
|
||||||
|
* the trasn. commit.
|
||||||
|
*/
|
||||||
|
memalloc_nofs_save();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* And now, wait forever for commit wakeup events.
|
* And now, wait forever for commit wakeup events.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -29,6 +29,7 @@
|
|||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/sched/mm.h>
|
||||||
|
|
||||||
#include <trace/events/jbd2.h>
|
#include <trace/events/jbd2.h>
|
||||||
|
|
||||||
@@ -388,6 +389,11 @@ repeat:
|
|||||||
|
|
||||||
rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
|
rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
|
||||||
jbd2_journal_free_transaction(new_transaction);
|
jbd2_journal_free_transaction(new_transaction);
|
||||||
|
/*
|
||||||
|
* Ensure that no allocations done while the transaction is open are
|
||||||
|
* going to recurse back to the fs layer.
|
||||||
|
*/
|
||||||
|
handle->saved_alloc_context = memalloc_nofs_save();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -466,6 +472,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
|
|||||||
trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
|
trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
|
||||||
handle->h_transaction->t_tid, type,
|
handle->h_transaction->t_tid, type,
|
||||||
line_no, nblocks);
|
line_no, nblocks);
|
||||||
|
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(jbd2__journal_start);
|
EXPORT_SYMBOL(jbd2__journal_start);
|
||||||
@@ -1760,6 +1767,11 @@ int jbd2_journal_stop(handle_t *handle)
|
|||||||
if (handle->h_rsv_handle)
|
if (handle->h_rsv_handle)
|
||||||
jbd2_journal_free_reserved(handle->h_rsv_handle);
|
jbd2_journal_free_reserved(handle->h_rsv_handle);
|
||||||
free_and_exit:
|
free_and_exit:
|
||||||
|
/*
|
||||||
|
* Scope of the GFP_NOFS context is over here and so we can restore the
|
||||||
|
* original alloc context.
|
||||||
|
*/
|
||||||
|
memalloc_nofs_restore(handle->saved_alloc_context);
|
||||||
jbd2_free_handle(handle);
|
jbd2_free_handle(handle);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2242,13 +2242,13 @@ unlock:
|
|||||||
spin_unlock(&o2hb_live_lock);
|
spin_unlock(&o2hb_live_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
|
static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
|
||||||
char *page)
|
char *page)
|
||||||
{
|
{
|
||||||
return sprintf(page, "%u\n", o2hb_dead_threshold);
|
return sprintf(page, "%u\n", o2hb_dead_threshold);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
|
static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
|
||||||
const char *page, size_t count)
|
const char *page, size_t count)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
@@ -2297,11 +2297,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
|
CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
|
||||||
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
|
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
|
||||||
|
|
||||||
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
|
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
|
||||||
&o2hb_heartbeat_group_attr_threshold,
|
&o2hb_heartbeat_group_attr_dead_threshold,
|
||||||
&o2hb_heartbeat_group_attr_mode,
|
&o2hb_heartbeat_group_attr_mode,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -450,9 +450,8 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
|
|||||||
INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
|
INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
|
||||||
INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
|
INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
|
||||||
|
|
||||||
init_timer(&sc->sc_idle_timeout);
|
setup_timer(&sc->sc_idle_timeout, o2net_idle_timer,
|
||||||
sc->sc_idle_timeout.function = o2net_idle_timer;
|
(unsigned long)sc);
|
||||||
sc->sc_idle_timeout.data = (unsigned long)sc;
|
|
||||||
|
|
||||||
sclog(sc, "alloced\n");
|
sclog(sc, "alloced\n");
|
||||||
|
|
||||||
@@ -956,7 +955,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
|
|||||||
mutex_lock(&sc->sc_send_lock);
|
mutex_lock(&sc->sc_send_lock);
|
||||||
ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
|
ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
|
||||||
virt_to_page(kmalloced_virt),
|
virt_to_page(kmalloced_virt),
|
||||||
(long)kmalloced_virt & ~PAGE_MASK,
|
offset_in_page(kmalloced_virt),
|
||||||
size, MSG_DONTWAIT);
|
size, MSG_DONTWAIT);
|
||||||
mutex_unlock(&sc->sc_send_lock);
|
mutex_unlock(&sc->sc_send_lock);
|
||||||
if (ret == size)
|
if (ret == size)
|
||||||
|
|||||||
+7
-1
@@ -441,6 +441,7 @@ struct mem_size_stats {
|
|||||||
unsigned long private_dirty;
|
unsigned long private_dirty;
|
||||||
unsigned long referenced;
|
unsigned long referenced;
|
||||||
unsigned long anonymous;
|
unsigned long anonymous;
|
||||||
|
unsigned long lazyfree;
|
||||||
unsigned long anonymous_thp;
|
unsigned long anonymous_thp;
|
||||||
unsigned long shmem_thp;
|
unsigned long shmem_thp;
|
||||||
unsigned long swap;
|
unsigned long swap;
|
||||||
@@ -457,8 +458,11 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
|||||||
int i, nr = compound ? 1 << compound_order(page) : 1;
|
int i, nr = compound ? 1 << compound_order(page) : 1;
|
||||||
unsigned long size = nr * PAGE_SIZE;
|
unsigned long size = nr * PAGE_SIZE;
|
||||||
|
|
||||||
if (PageAnon(page))
|
if (PageAnon(page)) {
|
||||||
mss->anonymous += size;
|
mss->anonymous += size;
|
||||||
|
if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
|
||||||
|
mss->lazyfree += size;
|
||||||
|
}
|
||||||
|
|
||||||
mss->resident += size;
|
mss->resident += size;
|
||||||
/* Accumulate the size in pages that have been accessed. */
|
/* Accumulate the size in pages that have been accessed. */
|
||||||
@@ -771,6 +775,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
|
|||||||
"Private_Dirty: %8lu kB\n"
|
"Private_Dirty: %8lu kB\n"
|
||||||
"Referenced: %8lu kB\n"
|
"Referenced: %8lu kB\n"
|
||||||
"Anonymous: %8lu kB\n"
|
"Anonymous: %8lu kB\n"
|
||||||
|
"LazyFree: %8lu kB\n"
|
||||||
"AnonHugePages: %8lu kB\n"
|
"AnonHugePages: %8lu kB\n"
|
||||||
"ShmemPmdMapped: %8lu kB\n"
|
"ShmemPmdMapped: %8lu kB\n"
|
||||||
"Shared_Hugetlb: %8lu kB\n"
|
"Shared_Hugetlb: %8lu kB\n"
|
||||||
@@ -789,6 +794,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
|
|||||||
mss.private_dirty >> 10,
|
mss.private_dirty >> 10,
|
||||||
mss.referenced >> 10,
|
mss.referenced >> 10,
|
||||||
mss.anonymous >> 10,
|
mss.anonymous >> 10,
|
||||||
|
mss.lazyfree >> 10,
|
||||||
mss.anonymous_thp >> 10,
|
mss.anonymous_thp >> 10,
|
||||||
mss.shmem_thp >> 10,
|
mss.shmem_thp >> 10,
|
||||||
mss.shared_hugetlb >> 10,
|
mss.shared_hugetlb >> 10,
|
||||||
|
|||||||
+6
-6
@@ -48,7 +48,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
|
|||||||
void *
|
void *
|
||||||
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
||||||
{
|
{
|
||||||
unsigned noio_flag = 0;
|
unsigned nofs_flag = 0;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
gfp_t lflags;
|
gfp_t lflags;
|
||||||
|
|
||||||
@@ -60,17 +60,17 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|||||||
* __vmalloc() will allocate data pages and auxillary structures (e.g.
|
* __vmalloc() will allocate data pages and auxillary structures (e.g.
|
||||||
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
|
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
|
||||||
* here. Hence we need to tell memory reclaim that we are in such a
|
* here. Hence we need to tell memory reclaim that we are in such a
|
||||||
* context via PF_MEMALLOC_NOIO to prevent memory reclaim re-entering
|
* context via PF_MEMALLOC_NOFS to prevent memory reclaim re-entering
|
||||||
* the filesystem here and potentially deadlocking.
|
* the filesystem here and potentially deadlocking.
|
||||||
*/
|
*/
|
||||||
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
if (flags & KM_NOFS)
|
||||||
noio_flag = memalloc_noio_save();
|
nofs_flag = memalloc_nofs_save();
|
||||||
|
|
||||||
lflags = kmem_flags_convert(flags);
|
lflags = kmem_flags_convert(flags);
|
||||||
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
|
||||||
|
|
||||||
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
if (flags & KM_NOFS)
|
||||||
memalloc_noio_restore(noio_flag);
|
memalloc_nofs_restore(nofs_flag);
|
||||||
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|||||||
+1
-1
@@ -50,7 +50,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
|
|||||||
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
||||||
} else {
|
} else {
|
||||||
lflags = GFP_KERNEL | __GFP_NOWARN;
|
lflags = GFP_KERNEL | __GFP_NOWARN;
|
||||||
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
|
if (flags & KM_NOFS)
|
||||||
lflags &= ~__GFP_FS;
|
lflags &= ~__GFP_FS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2886,7 +2886,7 @@ xfs_btree_split_worker(
|
|||||||
struct xfs_btree_split_args *args = container_of(work,
|
struct xfs_btree_split_args *args = container_of(work,
|
||||||
struct xfs_btree_split_args, work);
|
struct xfs_btree_split_args, work);
|
||||||
unsigned long pflags;
|
unsigned long pflags;
|
||||||
unsigned long new_pflags = PF_FSTRANS;
|
unsigned long new_pflags = PF_MEMALLOC_NOFS;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we are in a transaction context here, but may also be doing work
|
* we are in a transaction context here, but may also be doing work
|
||||||
|
|||||||
+3
-3
@@ -189,7 +189,7 @@ xfs_setfilesize_trans_alloc(
|
|||||||
* We hand off the transaction to the completion thread now, so
|
* We hand off the transaction to the completion thread now, so
|
||||||
* clear the flag here.
|
* clear the flag here.
|
||||||
*/
|
*/
|
||||||
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
|
current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,7 +252,7 @@ xfs_setfilesize_ioend(
|
|||||||
* thus we need to mark ourselves as being in a transaction manually.
|
* thus we need to mark ourselves as being in a transaction manually.
|
||||||
* Similarly for freeze protection.
|
* Similarly for freeze protection.
|
||||||
*/
|
*/
|
||||||
current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
|
current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
|
||||||
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
|
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
|
||||||
|
|
||||||
/* we abort the update if there was an IO error */
|
/* we abort the update if there was an IO error */
|
||||||
@@ -1016,7 +1016,7 @@ xfs_do_writepage(
|
|||||||
* Given that we do not allow direct reclaim to call us, we should
|
* Given that we do not allow direct reclaim to call us, we should
|
||||||
* never be called while in a filesystem transaction.
|
* never be called while in a filesystem transaction.
|
||||||
*/
|
*/
|
||||||
if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
|
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
|
||||||
goto redirty;
|
goto redirty;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
+4
-4
@@ -443,17 +443,17 @@ _xfs_buf_map_pages(
|
|||||||
bp->b_addr = NULL;
|
bp->b_addr = NULL;
|
||||||
} else {
|
} else {
|
||||||
int retried = 0;
|
int retried = 0;
|
||||||
unsigned noio_flag;
|
unsigned nofs_flag;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vm_map_ram() will allocate auxillary structures (e.g.
|
* vm_map_ram() will allocate auxillary structures (e.g.
|
||||||
* pagetables) with GFP_KERNEL, yet we are likely to be under
|
* pagetables) with GFP_KERNEL, yet we are likely to be under
|
||||||
* GFP_NOFS context here. Hence we need to tell memory reclaim
|
* GFP_NOFS context here. Hence we need to tell memory reclaim
|
||||||
* that we are in such a context via PF_MEMALLOC_NOIO to prevent
|
* that we are in such a context via PF_MEMALLOC_NOFS to prevent
|
||||||
* memory reclaim re-entering the filesystem here and
|
* memory reclaim re-entering the filesystem here and
|
||||||
* potentially deadlocking.
|
* potentially deadlocking.
|
||||||
*/
|
*/
|
||||||
noio_flag = memalloc_noio_save();
|
nofs_flag = memalloc_nofs_save();
|
||||||
do {
|
do {
|
||||||
bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
|
bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
|
||||||
-1, PAGE_KERNEL);
|
-1, PAGE_KERNEL);
|
||||||
@@ -461,7 +461,7 @@ _xfs_buf_map_pages(
|
|||||||
break;
|
break;
|
||||||
vm_unmap_aliases();
|
vm_unmap_aliases();
|
||||||
} while (retried++ <= 1);
|
} while (retried++ <= 1);
|
||||||
memalloc_noio_restore(noio_flag);
|
memalloc_nofs_restore(nofs_flag);
|
||||||
|
|
||||||
if (!bp->b_addr)
|
if (!bp->b_addr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user