You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
fs: Convert __set_page_dirty_buffers to block_dirty_folio
Convert all callers; mostly this is just changing the aops to point at it, but a few implementations need a little more work. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
@@ -429,7 +429,7 @@ static int blkdev_writepages(struct address_space *mapping,
|
||||
}
|
||||
|
||||
const struct address_space_operations def_blk_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = blkdev_readpage,
|
||||
.readahead = blkdev_readahead,
|
||||
|
||||
@@ -73,7 +73,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
|
||||
}
|
||||
|
||||
static const struct address_space_operations adfs_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = adfs_readpage,
|
||||
.writepage = adfs_writepage,
|
||||
|
||||
@@ -453,7 +453,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
|
||||
}
|
||||
|
||||
const struct address_space_operations affs_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = affs_readpage,
|
||||
.writepage = affs_writepage,
|
||||
@@ -835,7 +835,7 @@ err_bh:
|
||||
}
|
||||
|
||||
const struct address_space_operations affs_aops_ofs = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = affs_readpage_ofs,
|
||||
//.writepage = affs_writepage_ofs,
|
||||
|
||||
@@ -188,7 +188,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
|
||||
}
|
||||
|
||||
const struct address_space_operations bfs_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = bfs_readpage,
|
||||
.writepage = bfs_writepage,
|
||||
|
||||
33
fs/buffer.c
33
fs/buffer.c
@@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
|
||||
* FIXME: may need to call ->reservepage here as well. That's rather up to the
|
||||
* address_space though.
|
||||
*/
|
||||
int __set_page_dirty_buffers(struct page *page)
|
||||
bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
|
||||
{
|
||||
int newly_dirty;
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
||||
if (unlikely(!mapping))
|
||||
return !TestSetPageDirty(page);
|
||||
struct buffer_head *head;
|
||||
bool newly_dirty;
|
||||
|
||||
spin_lock(&mapping->private_lock);
|
||||
if (page_has_buffers(page)) {
|
||||
struct buffer_head *head = page_buffers(page);
|
||||
head = folio_buffers(folio);
|
||||
if (head) {
|
||||
struct buffer_head *bh = head;
|
||||
|
||||
do {
|
||||
@@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page)
|
||||
* Lock out page's memcg migration to keep PageDirty
|
||||
* synchronized with per-memcg dirty page counters.
|
||||
*/
|
||||
lock_page_memcg(page);
|
||||
newly_dirty = !TestSetPageDirty(page);
|
||||
folio_memcg_lock(folio);
|
||||
newly_dirty = !folio_test_set_dirty(folio);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
|
||||
if (newly_dirty)
|
||||
__set_page_dirty(page, mapping, 1);
|
||||
__folio_mark_dirty(folio, mapping, 1);
|
||||
|
||||
unlock_page_memcg(page);
|
||||
folio_memcg_unlock(folio);
|
||||
|
||||
if (newly_dirty)
|
||||
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
||||
|
||||
return newly_dirty;
|
||||
}
|
||||
EXPORT_SYMBOL(__set_page_dirty_buffers);
|
||||
EXPORT_SYMBOL(block_dirty_folio);
|
||||
|
||||
/*
|
||||
* Write out and wait upon a list of buffers.
|
||||
@@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio);
|
||||
|
||||
/*
|
||||
* We attach and possibly dirty the buffers atomically wrt
|
||||
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
|
||||
* block_dirty_folio() via private_lock. try_to_free_buffers
|
||||
* is already excluded via the page lock.
|
||||
*/
|
||||
void create_empty_buffers(struct page *page,
|
||||
@@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
(1 << BH_Dirty)|(1 << BH_Uptodate));
|
||||
|
||||
/*
|
||||
* Be very careful. We have no exclusion from __set_page_dirty_buffers
|
||||
* Be very careful. We have no exclusion from block_dirty_folio
|
||||
* here, and the (potentially unmapped) buffers may become dirty at
|
||||
* any time. If a buffer becomes dirty here after we've inspected it
|
||||
* then we just miss that fact, and the page stays dirty.
|
||||
*
|
||||
* Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
|
||||
* Buffers outside i_size may be dirtied by block_dirty_folio;
|
||||
* handle that here by just cleaning them.
|
||||
*/
|
||||
|
||||
@@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
|
||||
*
|
||||
* The same applies to regular filesystem pages: if all the buffers are
|
||||
* clean then we set the page clean and proceed. To do that, we require
|
||||
* total exclusion from __set_page_dirty_buffers(). That is obtained with
|
||||
* total exclusion from block_dirty_folio(). That is obtained with
|
||||
* private_lock.
|
||||
*
|
||||
* try_to_free_buffers() is non-blocking.
|
||||
@@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page)
|
||||
* the page also.
|
||||
*
|
||||
* private_lock must be held over this entire operation in order
|
||||
* to synchronise against __set_page_dirty_buffers and prevent the
|
||||
* to synchronise against block_dirty_folio and prevent the
|
||||
* dirty bit from being lost.
|
||||
*/
|
||||
if (ret)
|
||||
|
||||
@@ -545,7 +545,7 @@ const struct address_space_operations ecryptfs_aops = {
|
||||
* feedback.
|
||||
*/
|
||||
#ifdef CONFIG_BLOCK
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
#endif
|
||||
.writepage = ecryptfs_writepage,
|
||||
|
||||
@@ -490,7 +490,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
|
||||
}
|
||||
|
||||
static const struct address_space_operations exfat_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = exfat_readpage,
|
||||
.readahead = exfat_readahead,
|
||||
|
||||
@@ -967,8 +967,8 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
|
||||
}
|
||||
|
||||
const struct address_space_operations ext2_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = ext2_readpage,
|
||||
.readahead = ext2_readahead,
|
||||
.writepage = ext2_writepage,
|
||||
@@ -983,8 +983,8 @@ const struct address_space_operations ext2_aops = {
|
||||
};
|
||||
|
||||
const struct address_space_operations ext2_nobh_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = ext2_readpage,
|
||||
.readahead = ext2_readahead,
|
||||
.writepage = ext2_nobh_writepage,
|
||||
|
||||
@@ -3560,11 +3560,11 @@ static bool ext4_journalled_dirty_folio(struct address_space *mapping,
|
||||
return filemap_dirty_folio(mapping, folio);
|
||||
}
|
||||
|
||||
static int ext4_set_page_dirty(struct page *page)
|
||||
static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
|
||||
{
|
||||
WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
|
||||
WARN_ON_ONCE(!page_has_buffers(page));
|
||||
return __set_page_dirty_buffers(page);
|
||||
WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
|
||||
WARN_ON_ONCE(!folio_buffers(folio));
|
||||
return block_dirty_folio(mapping, folio);
|
||||
}
|
||||
|
||||
static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
|
||||
@@ -3581,7 +3581,7 @@ static const struct address_space_operations ext4_aops = {
|
||||
.writepages = ext4_writepages,
|
||||
.write_begin = ext4_write_begin,
|
||||
.write_end = ext4_write_end,
|
||||
.set_page_dirty = ext4_set_page_dirty,
|
||||
.dirty_folio = ext4_dirty_folio,
|
||||
.bmap = ext4_bmap,
|
||||
.invalidate_folio = ext4_invalidate_folio,
|
||||
.releasepage = ext4_releasepage,
|
||||
@@ -3616,7 +3616,7 @@ static const struct address_space_operations ext4_da_aops = {
|
||||
.writepages = ext4_writepages,
|
||||
.write_begin = ext4_da_write_begin,
|
||||
.write_end = ext4_da_write_end,
|
||||
.set_page_dirty = ext4_set_page_dirty,
|
||||
.dirty_folio = ext4_dirty_folio,
|
||||
.bmap = ext4_bmap,
|
||||
.invalidate_folio = ext4_invalidate_folio,
|
||||
.releasepage = ext4_releasepage,
|
||||
|
||||
@@ -342,7 +342,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
|
||||
}
|
||||
|
||||
static const struct address_space_operations fat_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = fat_readpage,
|
||||
.readahead = fat_readahead,
|
||||
|
||||
@@ -606,18 +606,12 @@ out:
|
||||
gfs2_trans_end(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
* jdata_set_page_dirty - Page dirtying function
|
||||
* @page: The page to dirty
|
||||
*
|
||||
* Returns: 1 if it dirtyed the page, or 0 otherwise
|
||||
*/
|
||||
|
||||
static int jdata_set_page_dirty(struct page *page)
|
||||
static bool jdata_dirty_folio(struct address_space *mapping,
|
||||
struct folio *folio)
|
||||
{
|
||||
if (current->journal_info)
|
||||
SetPageChecked(page);
|
||||
return __set_page_dirty_buffers(page);
|
||||
folio_set_checked(folio);
|
||||
return block_dirty_folio(mapping, folio);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -795,7 +789,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
|
||||
.writepages = gfs2_jdata_writepages,
|
||||
.readpage = gfs2_readpage,
|
||||
.readahead = gfs2_readahead,
|
||||
.set_page_dirty = jdata_set_page_dirty,
|
||||
.dirty_folio = jdata_dirty_folio,
|
||||
.bmap = gfs2_bmap,
|
||||
.invalidate_folio = gfs2_invalidate_folio,
|
||||
.releasepage = gfs2_releasepage,
|
||||
|
||||
@@ -89,14 +89,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
|
||||
}
|
||||
|
||||
const struct address_space_operations gfs2_meta_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.writepage = gfs2_aspace_writepage,
|
||||
.releasepage = gfs2_releasepage,
|
||||
};
|
||||
|
||||
const struct address_space_operations gfs2_rgrp_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.writepage = gfs2_aspace_writepage,
|
||||
.releasepage = gfs2_releasepage,
|
||||
|
||||
@@ -159,7 +159,7 @@ static int hfs_writepages(struct address_space *mapping,
|
||||
}
|
||||
|
||||
const struct address_space_operations hfs_btree_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = hfs_readpage,
|
||||
.writepage = hfs_writepage,
|
||||
@@ -170,7 +170,7 @@ const struct address_space_operations hfs_btree_aops = {
|
||||
};
|
||||
|
||||
const struct address_space_operations hfs_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = hfs_readpage,
|
||||
.writepage = hfs_writepage,
|
||||
|
||||
@@ -156,7 +156,7 @@ static int hfsplus_writepages(struct address_space *mapping,
|
||||
}
|
||||
|
||||
const struct address_space_operations hfsplus_btree_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = hfsplus_readpage,
|
||||
.writepage = hfsplus_writepage,
|
||||
@@ -167,7 +167,7 @@ const struct address_space_operations hfsplus_btree_aops = {
|
||||
};
|
||||
|
||||
const struct address_space_operations hfsplus_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = hfsplus_readpage,
|
||||
.writepage = hfsplus_writepage,
|
||||
|
||||
@@ -245,7 +245,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
}
|
||||
|
||||
const struct address_space_operations hpfs_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = hpfs_readpage,
|
||||
.writepage = hpfs_writepage,
|
||||
|
||||
@@ -357,7 +357,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
}
|
||||
|
||||
const struct address_space_operations jfs_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = jfs_readpage,
|
||||
.readahead = jfs_readahead,
|
||||
|
||||
@@ -442,7 +442,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
|
||||
}
|
||||
|
||||
static const struct address_space_operations minix_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.readpage = minix_readpage,
|
||||
.writepage = minix_writepage,
|
||||
|
||||
@@ -504,7 +504,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
|
||||
if (!buffer_mapped(bh)) {
|
||||
/*
|
||||
* unmapped dirty buffers are created by
|
||||
* __set_page_dirty_buffers -> mmapped data
|
||||
* block_dirty_folio -> mmapped data
|
||||
*/
|
||||
if (buffer_dirty(bh))
|
||||
goto confused;
|
||||
|
||||
@@ -434,8 +434,8 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
|
||||
|
||||
|
||||
static const struct address_space_operations def_mdt_aops = {
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
.invalidate_folio = block_invalidate_folio,
|
||||
.writepage = nilfs_mdt_write_page,
|
||||
};
|
||||
|
||||
|
||||
@@ -593,12 +593,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
|
||||
iblock = initialized_size >> blocksize_bits;
|
||||
|
||||
/*
|
||||
* Be very careful. We have no exclusion from __set_page_dirty_buffers
|
||||
* Be very careful. We have no exclusion from block_dirty_folio
|
||||
* here, and the (potentially unmapped) buffers may become dirty at
|
||||
* any time. If a buffer becomes dirty here after we've inspected it
|
||||
* then we just miss that fact, and the page stays dirty.
|
||||
*
|
||||
* Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
|
||||
* Buffers outside i_size may be dirtied by block_dirty_folio;
|
||||
* handle that here by just cleaning them.
|
||||
*/
|
||||
|
||||
@@ -653,7 +653,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
|
||||
// Update initialized size in the attribute and
|
||||
// in the inode.
|
||||
// Again, for each page do:
|
||||
// __set_page_dirty_buffers();
|
||||
// block_dirty_folio();
|
||||
// put_page()
|
||||
// We don't need to wait on the writes.
|
||||
// Update iblock.
|
||||
@@ -1654,7 +1654,7 @@ const struct address_space_operations ntfs_normal_aops = {
|
||||
.readpage = ntfs_readpage,
|
||||
#ifdef NTFS_RW
|
||||
.writepage = ntfs_writepage,
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
#endif /* NTFS_RW */
|
||||
.bmap = ntfs_bmap,
|
||||
.migratepage = buffer_migrate_page,
|
||||
@@ -1669,7 +1669,7 @@ const struct address_space_operations ntfs_compressed_aops = {
|
||||
.readpage = ntfs_readpage,
|
||||
#ifdef NTFS_RW
|
||||
.writepage = ntfs_writepage,
|
||||
.set_page_dirty = __set_page_dirty_buffers,
|
||||
.dirty_folio = block_dirty_folio,
|
||||
#endif /* NTFS_RW */
|
||||
.migratepage = buffer_migrate_page,
|
||||
.is_partially_uptodate = block_is_partially_uptodate,
|
||||
@@ -1746,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
|
||||
set_buffer_dirty(bh);
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
__set_page_dirty_nobuffers(page);
|
||||
block_dirty_folio(mapping, page_folio(page));
|
||||
if (unlikely(buffers_to_free)) {
|
||||
do {
|
||||
bh = buffers_to_free->b_this_page;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user