mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-11-20 04:51:03 +00:00
During testing I found there are some times the zswap_writeback_entry()
return -ENOMEM, which is not we expected:
bpftrace -e 'kr:zswap_writeback_entry {@[(int32)retval]=count()}'
@[-12]: 1563
@[0]: 277221
The reason is that __read_swap_cache_async() return NULL because
swapcache_prepare() failed. The reason is that we won't invalidate zswap
entry when swap entry freed to the per-cpu pool, these zswap entries are
still on the zswap tree and lru list.
This patch moves the invalidation ahead to when swap entry freed to the
per-cpu pool, since there is no any benefit to leave trashy zswap entry on
the tree and lru list.
With this patch:
bpftrace -e 'kr:zswap_writeback_entry {@[(int32)retval]=count()}'
@[0]: 259744
Note: large folio can't have zswap entry for now, so don't bother
to add zswap entry invalidation in the large folio swap free path.
Link: https://lkml.kernel.org/r/20240201-b4-zswap-invalidate-entry-v2-2-99d4084260a0@bytedance.com
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
71 lines
1.7 KiB
C
71 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_ZSWAP_H
|
|
#define _LINUX_ZSWAP_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/mm_types.h>
|
|
|
|
struct lruvec;
|
|
|
|
extern u64 zswap_pool_total_size;
|
|
extern atomic_t zswap_stored_pages;
|
|
|
|
#ifdef CONFIG_ZSWAP
|
|
|
|
struct zswap_lruvec_state {
|
|
/*
|
|
* Number of pages in zswap that should be protected from the shrinker.
|
|
* This number is an estimate of the following counts:
|
|
*
|
|
* a) Recent page faults.
|
|
* b) Recent insertion to the zswap LRU. This includes new zswap stores,
|
|
* as well as recent zswap LRU rotations.
|
|
*
|
|
* These pages are likely to be warm, and might incur IO if the are written
|
|
* to swap.
|
|
*/
|
|
atomic_long_t nr_zswap_protected;
|
|
};
|
|
|
|
bool zswap_store(struct folio *folio);
|
|
bool zswap_load(struct folio *folio);
|
|
void zswap_invalidate(swp_entry_t swp);
|
|
int zswap_swapon(int type, unsigned long nr_pages);
|
|
void zswap_swapoff(int type);
|
|
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
|
|
void zswap_lruvec_state_init(struct lruvec *lruvec);
|
|
void zswap_folio_swapin(struct folio *folio);
|
|
bool is_zswap_enabled(void);
|
|
#else
|
|
|
|
struct zswap_lruvec_state {};
|
|
|
|
static inline bool zswap_store(struct folio *folio)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool zswap_load(struct folio *folio)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void zswap_invalidate(swp_entry_t swp) {}
|
|
static inline int zswap_swapon(int type, unsigned long nr_pages)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void zswap_swapoff(int type) {}
|
|
static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
|
|
static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
|
|
static inline void zswap_folio_swapin(struct folio *folio) {}
|
|
|
|
static inline bool is_zswap_enabled(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif /* _LINUX_ZSWAP_H */
|