mm: Remove swap_writepage() and shmem_writepage()

Call swap_writeout() and shmem_writeout() from pageout() instead.

Signed-off-by: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Link: https://lore.kernel.org/r/20250402150005.2309458-9-willy@infradead.org
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2025-04-02 16:00:02 +01:00 committed by Christian Brauner
parent fe75adffac
commit 84798514db
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
7 changed files with 26 additions and 37 deletions

View File

@ -37,7 +37,7 @@
enum wbt_flags {
WBT_TRACKED = 1, /* write, tracked for throttling */
WBT_READ = 2, /* read */
WBT_SWAP = 4, /* write, from swap_writepage() */
WBT_SWAP = 4, /* write, from swap_writeout() */
WBT_DISCARD = 8, /* discard */
WBT_NR_BITS = 4, /* number of bits */

View File

@ -237,9 +237,8 @@ static void swap_zeromap_folio_clear(struct folio *folio)
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
*/
int swap_writepage(struct page *page, struct writeback_control *wbc)
int swap_writeout(struct folio *folio, struct writeback_control *wbc)
{
struct folio *folio = page_folio(page);
int ret;
if (folio_free_swap(folio)) {

View File

@ -98,7 +98,7 @@ static struct vfsmount *shm_mnt __ro_after_init;
#define SHORT_SYMLINK_LEN 128
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
* shmem_fallocate communicates with shmem_fault or shmem_writeout via
* inode->i_private (with i_rwsem making sure that it has only one user at
* a time): we would prefer not to enlarge the shmem inode just for that.
*/
@ -107,7 +107,7 @@ struct shmem_falloc {
pgoff_t start; /* start of range currently being fallocated */
pgoff_t next; /* the next page offset to be fallocated */
pgoff_t nr_falloced; /* how many new pages have been fallocated */
pgoff_t nr_unswapped; /* how often writepage refused to swap out */
pgoff_t nr_unswapped; /* how often writeout refused to swap out */
};
struct shmem_options {
@ -446,7 +446,7 @@ static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
/*
* Special case: whereas normally shmem_recalc_inode() is called
* after i_mapping->nrpages has already been adjusted (up or down),
* shmem_writepage() has to raise swapped before nrpages is lowered -
* shmem_writeout() has to raise swapped before nrpages is lowered -
* to stop a racing shmem_recalc_inode() from thinking that a page has
* been freed. Compensate here, to avoid the need for a followup call.
*/
@ -1536,11 +1536,6 @@ int shmem_unuse(unsigned int type)
return error;
}
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
return shmem_writeout(page_folio(page), wbc);
}
/**
* shmem_writeout - Write the folio to swap
* @folio: The folio to write
@ -1558,13 +1553,6 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
int nr_pages;
bool split = false;
/*
* Our capabilities prevent regular writeback or sync from ever calling
* shmem_writepage; but a stacking filesystem might use ->writepage of
* its underlying filesystem, in which case tmpfs should write out to
* swap only in response to memory pressure, and not for the writeback
* threads or sync.
*/
if (WARN_ON_ONCE(!wbc->for_reclaim))
goto redirty;
@ -1653,7 +1641,7 @@ int shmem_writeout(struct folio *folio, struct writeback_control *wbc)
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(folio_mapped(folio));
return swap_writepage(&folio->page, wbc);
return swap_writeout(folio, wbc);
}
list_del_init(&info->swaplist);
@ -3776,7 +3764,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
index--;
/*
* Inform shmem_writepage() how far we have reached.
* Inform shmem_writeout() how far we have reached.
* No need for lock or barrier: we have the page lock.
*/
if (!folio_test_uptodate(folio))
@ -5199,7 +5187,6 @@ static int shmem_error_remove_folio(struct address_space *mapping,
}
static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.dirty_folio = noop_dirty_folio,
#ifdef CONFIG_TMPFS
.write_begin = shmem_write_begin,

View File

@ -20,7 +20,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
__swap_read_unplug(plug);
}
void swap_write_unplug(struct swap_iocb *sio);
int swap_writepage(struct page *page, struct writeback_control *wbc);
int swap_writeout(struct folio *folio, struct writeback_control *wbc);
void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
/* linux/mm/swap_state.c */
@ -141,7 +141,7 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
return NULL;
}
static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
static inline int swap_writeout(struct folio *f, struct writeback_control *wbc)
{
return 0;
}

View File

@ -30,7 +30,6 @@
* vmscan's shrink_folio_list.
*/
static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
.dirty_folio = noop_dirty_folio,
#ifdef CONFIG_MIGRATION
.migrate_folio = migrate_folio,

View File

@ -2359,7 +2359,7 @@ static int try_to_unuse(unsigned int type)
* Limit the number of retries? No: when mmget_not_zero()
* above fails, that mm is likely to be freeing swap from
* exit_mmap(), which proceeds at its own independent pace;
* and even shmem_writepage() could have been preempted after
* and even shmem_writeout() could have been preempted after
* folio_alloc_swap(), temporarily hiding that swap. It's easy
* and robust (though cpu-intensive) just to keep retrying.
*/

View File

@ -653,16 +653,16 @@ typedef enum {
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
struct swap_iocb **plug, struct list_head *folio_list)
{
int (*writeout)(struct folio *, struct writeback_control *);
/*
* If the folio is dirty, only perform writeback if that write
* will be non-blocking. To prevent this allocation from being
* stalled by pagecache activity. But note that there may be
* stalls if we need to run get_block(). We could test
* PagePrivate for that.
*
* If this process is currently in __generic_file_write_iter() against
* this folio's queue, we can perform writeback even if that
* will block.
* We no longer attempt to writeback filesystem folios here, other
* than tmpfs/shmem. That's taken care of in page-writeback.
* If we find a dirty filesystem folio at the end of the LRU list,
* typically that means the filesystem is saturating the storage
* with contiguous writes and telling it to write a folio here
* would only make the situation worse by injecting an element
* of random access.
*
* If the folio is swapcache, write it back even if that would
* block, for some throttling. This happens by accident, because
@ -685,7 +685,11 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
}
return PAGE_KEEP;
}
if (mapping->a_ops->writepage == NULL)
if (shmem_mapping(mapping))
writeout = shmem_writeout;
else if (folio_test_anon(folio))
writeout = swap_writeout;
else
return PAGE_ACTIVATE;
if (folio_clear_dirty_for_io(folio)) {
@ -708,7 +712,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
wbc.list = folio_list;
folio_set_reclaim(folio);
res = mapping->a_ops->writepage(&folio->page, &wbc);
res = writeout(folio, &wbc);
if (res < 0)
handle_write_error(mapping, folio, res);
if (res == AOP_WRITEPAGE_ACTIVATE) {
@ -717,7 +721,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
}
if (!folio_test_writeback(folio)) {
/* synchronous write or broken a_ops? */
/* synchronous write? */
folio_clear_reclaim(folio);
}
trace_mm_vmscan_write_folio(folio);