f2fs: return bool from __write_node_folio

__write_node_folio can only return 0 or AOP_WRITEPAGE_ACTIVATE.
As part of phasing out AOP_WRITEPAGE_ACTIVATE, switch to a bool return
instead.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Christoph Hellwig 2025-05-08 07:14:32 +02:00 committed by Jaegeuk Kim
parent 0638f28b30
commit 80f31d2a7e

View File

@ -1651,7 +1651,7 @@ static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
return last_folio; return last_folio;
} }
static int __write_node_folio(struct folio *folio, bool atomic, bool *submitted, static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
struct writeback_control *wbc, bool do_balance, struct writeback_control *wbc, bool do_balance,
enum iostat_type io_type, unsigned int *seq_id) enum iostat_type io_type, unsigned int *seq_id)
{ {
@ -1681,7 +1681,7 @@ static int __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
folio_clear_uptodate(folio); folio_clear_uptodate(folio);
dec_page_count(sbi, F2FS_DIRTY_NODES); dec_page_count(sbi, F2FS_DIRTY_NODES);
folio_unlock(folio); folio_unlock(folio);
return 0; return true;
} }
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@ -1707,7 +1707,7 @@ static int __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
dec_page_count(sbi, F2FS_DIRTY_NODES); dec_page_count(sbi, F2FS_DIRTY_NODES);
f2fs_up_read(&sbi->node_write); f2fs_up_read(&sbi->node_write);
folio_unlock(folio); folio_unlock(folio);
return 0; return true;
} }
if (__is_valid_data_blkaddr(ni.blk_addr) && if (__is_valid_data_blkaddr(ni.blk_addr) &&
@ -1746,11 +1746,12 @@ static int __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
if (do_balance) if (do_balance)
f2fs_balance_fs(sbi, false); f2fs_balance_fs(sbi, false);
return 0; return true;
redirty_out: redirty_out:
folio_redirty_for_writepage(wbc, folio); folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE; folio_unlock(folio);
return false;
} }
int f2fs_move_node_folio(struct folio *node_folio, int gc_type) int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
@ -1772,11 +1773,9 @@ int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
goto out_page; goto out_page;
} }
if (__write_node_folio(node_folio, false, NULL, if (!__write_node_folio(node_folio, false, NULL,
&wbc, false, FS_GC_NODE_IO, NULL)) { &wbc, false, FS_GC_NODE_IO, NULL))
err = -EAGAIN; err = -EAGAIN;
folio_unlock(node_folio);
}
goto release_page; goto release_page;
} else { } else {
/* set page dirty and write it */ /* set page dirty and write it */
@ -1871,11 +1870,10 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
if (!folio_clear_dirty_for_io(folio)) if (!folio_clear_dirty_for_io(folio))
goto continue_unlock; goto continue_unlock;
if (__write_node_folio(folio, atomic && if (!__write_node_folio(folio, atomic &&
folio == last_folio, folio == last_folio,
&submitted, wbc, true, &submitted, wbc, true,
FS_NODE_IO, seq_id)) { FS_NODE_IO, seq_id)) {
folio_unlock(folio);
f2fs_folio_put(last_folio, false); f2fs_folio_put(last_folio, false);
folio_batch_release(&fbatch); folio_batch_release(&fbatch);
ret = -EIO; ret = -EIO;
@ -2078,16 +2076,15 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
set_fsync_mark(&folio->page, 0); set_fsync_mark(&folio->page, 0);
set_dentry_mark(&folio->page, 0); set_dentry_mark(&folio->page, 0);
ret = __write_node_folio(folio, false, &submitted, if (!__write_node_folio(folio, false, &submitted,
wbc, do_balance, io_type, NULL); wbc, do_balance, io_type, NULL)) {
if (ret) {
folio_unlock(folio); folio_unlock(folio);
folio_batch_release(&fbatch); folio_batch_release(&fbatch);
ret = -EIO; ret = -EIO;
goto out; goto out;
} else if (submitted) {
nwritten++;
} }
if (submitted)
nwritten++;
if (--wbc->nr_to_write == 0) if (--wbc->nr_to_write == 0)
break; break;