mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2026-01-28 05:53:14 +00:00
To reduce the runtime overhead even further when online fsck isn't running, use a static branch key to decide if we call wake_up on the drain. For compilers that support jump labels, the call to wake_up is replaced by a nop sled when nobody is waiting for intents to drain. From my initial microbenchmarking, every transition of the static key between the on and off states takes about 22000ns to complete; this is paid entirely by the xfs_scrub process. When the static key is off (which it should be when fsck isn't running), the nop sled adds an overhead of approximately 0.36ns to runtime code. The post-atomic lockless waiter check adds about 0.03ns, which is basically free. For the few compilers that don't support jump labels, runtime code pays the cost of calling wake_up on an empty waitqueue, which was observed to be about 30ns. However, most architectures that have sufficient memory and CPU capacity to run XFS also support jump labels, so this is not much of a worry. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
159 lines
3.3 KiB
C
159 lines
3.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Copyright (C) 2017-2023 Oracle. All Rights Reserved.
|
|
* Author: Darrick J. Wong <djwong@kernel.org>
|
|
*/
|
|
#include "xfs.h"
|
|
#include "xfs_fs.h"
|
|
#include "xfs_shared.h"
|
|
#include "xfs_format.h"
|
|
#include "xfs_trans_resv.h"
|
|
#include "xfs_mount.h"
|
|
#include "xfs_btree.h"
|
|
#include "xfs_alloc.h"
|
|
#include "xfs_rmap.h"
|
|
#include "scrub/scrub.h"
|
|
#include "scrub/common.h"
|
|
#include "scrub/btree.h"
|
|
#include "xfs_ag.h"
|
|
|
|
/*
|
|
* Set us up to scrub free space btrees.
|
|
*/
|
|
int
|
|
xchk_setup_ag_allocbt(
|
|
struct xfs_scrub *sc)
|
|
{
|
|
if (xchk_need_intent_drain(sc))
|
|
xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
|
|
|
|
return xchk_setup_ag_btree(sc, false);
|
|
}
|
|
|
|
/* Free space btree scrubber. */
|
|
/*
|
|
* Ensure there's a corresponding cntbt/bnobt record matching this
|
|
* bnobt/cntbt record, respectively.
|
|
*/
|
|
STATIC void
|
|
xchk_allocbt_xref_other(
|
|
struct xfs_scrub *sc,
|
|
xfs_agblock_t agbno,
|
|
xfs_extlen_t len)
|
|
{
|
|
struct xfs_btree_cur **pcur;
|
|
xfs_agblock_t fbno;
|
|
xfs_extlen_t flen;
|
|
int has_otherrec;
|
|
int error;
|
|
|
|
if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
|
|
pcur = &sc->sa.cnt_cur;
|
|
else
|
|
pcur = &sc->sa.bno_cur;
|
|
if (!*pcur || xchk_skip_xref(sc->sm))
|
|
return;
|
|
|
|
error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
|
|
if (!xchk_should_check_xref(sc, &error, pcur))
|
|
return;
|
|
if (!has_otherrec) {
|
|
xchk_btree_xref_set_corrupt(sc, *pcur, 0);
|
|
return;
|
|
}
|
|
|
|
error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
|
|
if (!xchk_should_check_xref(sc, &error, pcur))
|
|
return;
|
|
if (!has_otherrec) {
|
|
xchk_btree_xref_set_corrupt(sc, *pcur, 0);
|
|
return;
|
|
}
|
|
|
|
if (fbno != agbno || flen != len)
|
|
xchk_btree_xref_set_corrupt(sc, *pcur, 0);
|
|
}
|
|
|
|
/* Cross-reference with the other btrees. */
|
|
STATIC void
|
|
xchk_allocbt_xref(
|
|
struct xfs_scrub *sc,
|
|
xfs_agblock_t agbno,
|
|
xfs_extlen_t len)
|
|
{
|
|
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
|
return;
|
|
|
|
xchk_allocbt_xref_other(sc, agbno, len);
|
|
xchk_xref_is_not_inode_chunk(sc, agbno, len);
|
|
xchk_xref_has_no_owner(sc, agbno, len);
|
|
xchk_xref_is_not_shared(sc, agbno, len);
|
|
}
|
|
|
|
/* Scrub a bnobt/cntbt record. */
|
|
STATIC int
|
|
xchk_allocbt_rec(
|
|
struct xchk_btree *bs,
|
|
const union xfs_btree_rec *rec)
|
|
{
|
|
struct xfs_perag *pag = bs->cur->bc_ag.pag;
|
|
xfs_agblock_t bno;
|
|
xfs_extlen_t len;
|
|
|
|
bno = be32_to_cpu(rec->alloc.ar_startblock);
|
|
len = be32_to_cpu(rec->alloc.ar_blockcount);
|
|
|
|
if (!xfs_verify_agbext(pag, bno, len))
|
|
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
|
|
|
|
xchk_allocbt_xref(bs->sc, bno, len);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Scrub the freespace btrees for some AG. */
|
|
STATIC int
|
|
xchk_allocbt(
|
|
struct xfs_scrub *sc,
|
|
xfs_btnum_t which)
|
|
{
|
|
struct xfs_btree_cur *cur;
|
|
|
|
cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
|
|
return xchk_btree(sc, cur, xchk_allocbt_rec, &XFS_RMAP_OINFO_AG, NULL);
|
|
}
|
|
|
|
int
|
|
xchk_bnobt(
|
|
struct xfs_scrub *sc)
|
|
{
|
|
return xchk_allocbt(sc, XFS_BTNUM_BNO);
|
|
}
|
|
|
|
int
|
|
xchk_cntbt(
|
|
struct xfs_scrub *sc)
|
|
{
|
|
return xchk_allocbt(sc, XFS_BTNUM_CNT);
|
|
}
|
|
|
|
/* xref check that the extent is not free */
|
|
void
|
|
xchk_xref_is_used_space(
|
|
struct xfs_scrub *sc,
|
|
xfs_agblock_t agbno,
|
|
xfs_extlen_t len)
|
|
{
|
|
bool is_freesp;
|
|
int error;
|
|
|
|
if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
|
|
return;
|
|
|
|
error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
|
|
if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
|
|
return;
|
|
if (is_freesp)
|
|
xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
|
|
}
|