mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-04 02:25:58 +00:00
mm: add __dump_folio()
Turn __dump_page() into a wrapper around __dump_folio(). Snapshot the page & folio into a stack variable so we don't hit BUG_ON() if an allocation is freed under us and what was a folio pointer becomes a pointer to a tail page. [willy@infradead.org: fix build issue] Link: https://lkml.kernel.org/r/ZeAKCyTn_xS3O9cE@casper.infradead.org [willy@infradead.org: fix __dump_folio] Link: https://lkml.kernel.org/r/ZeJJegP8zM7S9GTy@casper.infradead.org [willy@infradead.org: fix pointer confusion] Link: https://lkml.kernel.org/r/ZeYa00ixxC4k1ot-@casper.infradead.org [akpm@linux-foundation.org: s/printk/pr_warn/] Link: https://lkml.kernel.org/r/20240227192337.757313-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7da8988c7c
commit
fae7d834c4
@ -2066,6 +2066,13 @@ static inline long folio_nr_pages(struct folio *folio)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
|
||||||
|
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
||||||
|
#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
|
||||||
|
#else
|
||||||
|
#define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* compound_nr() returns the number of pages in this potentially compound
|
* compound_nr() returns the number of pages in this potentially compound
|
||||||
* page. compound_nr() can be called on a tail page, and is defined to
|
* page. compound_nr() can be called on a tail page, and is defined to
|
||||||
|
@ -76,9 +76,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
|
|||||||
#ifdef CONFIG_CMA
|
#ifdef CONFIG_CMA
|
||||||
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
|
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
|
||||||
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
|
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
|
||||||
|
# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
|
||||||
|
get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
|
||||||
#else
|
#else
|
||||||
# define is_migrate_cma(migratetype) false
|
# define is_migrate_cma(migratetype) false
|
||||||
# define is_migrate_cma_page(_page) false
|
# define is_migrate_cma_page(_page) false
|
||||||
|
# define is_migrate_cma_folio(folio, pfn) false
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline bool is_migrate_movable(int mt)
|
static inline bool is_migrate_movable(int mt)
|
||||||
|
128
mm/debug.c
128
mm/debug.c
@ -51,84 +51,102 @@ const struct trace_print_flags vmaflag_names[] = {
|
|||||||
{0, NULL}
|
{0, NULL}
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __dump_page(struct page *page)
|
static void __dump_folio(struct folio *folio, struct page *page,
|
||||||
|
unsigned long pfn, unsigned long idx)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
struct address_space *mapping = folio_mapping(folio);
|
||||||
struct page *head = &folio->page;
|
int mapcount = 0;
|
||||||
struct address_space *mapping;
|
|
||||||
bool compound = PageCompound(page);
|
|
||||||
/*
|
|
||||||
* Accessing the pageblock without the zone lock. It could change to
|
|
||||||
* "isolate" again in the meantime, but since we are just dumping the
|
|
||||||
* state for debugging, it should be fine to accept a bit of
|
|
||||||
* inaccuracy here due to racing.
|
|
||||||
*/
|
|
||||||
bool page_cma = is_migrate_cma_page(page);
|
|
||||||
int mapcount;
|
|
||||||
char *type = "";
|
char *type = "";
|
||||||
|
|
||||||
if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
|
/*
|
||||||
/*
|
* page->_mapcount space in struct page is used by slab pages to
|
||||||
* Corrupt page, so we cannot call page_mapping. Instead, do a
|
* encode own info, and we must avoid calling page_folio() again.
|
||||||
* safe subset of the steps that page_mapping() does. Caution:
|
*/
|
||||||
* this will be misleading for tail pages, PageSwapCache pages,
|
if (!folio_test_slab(folio)) {
|
||||||
* and potentially other situations. (See the page_mapping()
|
mapcount = atomic_read(&page->_mapcount) + 1;
|
||||||
* implementation for what's missing here.)
|
if (folio_test_large(folio))
|
||||||
*/
|
mapcount += folio_entire_mapcount(folio);
|
||||||
unsigned long tmp = (unsigned long)page->mapping;
|
|
||||||
|
|
||||||
if (tmp & PAGE_MAPPING_ANON)
|
|
||||||
mapping = NULL;
|
|
||||||
else
|
|
||||||
mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
|
|
||||||
head = page;
|
|
||||||
folio = (struct folio *)page;
|
|
||||||
compound = false;
|
|
||||||
} else {
|
|
||||||
mapping = page_mapping(page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
|
||||||
* Avoid VM_BUG_ON() in page_mapcount().
|
folio_ref_count(folio), mapcount, mapping,
|
||||||
* page->_mapcount space in struct page is used by sl[aou]b pages to
|
folio->index + idx, pfn);
|
||||||
* encode own info.
|
if (folio_test_large(folio)) {
|
||||||
*/
|
pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
|
||||||
mapcount = PageSlab(head) ? 0 : page_mapcount(page);
|
folio_order(folio),
|
||||||
|
|
||||||
pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
|
|
||||||
page, page_ref_count(head), mapcount, mapping,
|
|
||||||
page_to_pgoff(page), page_to_pfn(page));
|
|
||||||
if (compound) {
|
|
||||||
pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
|
|
||||||
head, compound_order(head),
|
|
||||||
folio_entire_mapcount(folio),
|
folio_entire_mapcount(folio),
|
||||||
folio_nr_pages_mapped(folio),
|
folio_nr_pages_mapped(folio),
|
||||||
atomic_read(&folio->_pincount));
|
atomic_read(&folio->_pincount));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
if (head->memcg_data)
|
if (folio->memcg_data)
|
||||||
pr_warn("memcg:%lx\n", head->memcg_data);
|
pr_warn("memcg:%lx\n", folio->memcg_data);
|
||||||
#endif
|
#endif
|
||||||
if (PageKsm(page))
|
if (folio_test_ksm(folio))
|
||||||
type = "ksm ";
|
type = "ksm ";
|
||||||
else if (PageAnon(page))
|
else if (folio_test_anon(folio))
|
||||||
type = "anon ";
|
type = "anon ";
|
||||||
else if (mapping)
|
else if (mapping)
|
||||||
dump_mapping(mapping);
|
dump_mapping(mapping);
|
||||||
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
|
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
|
||||||
|
|
||||||
pr_warn("%sflags: %pGp%s\n", type, &head->flags,
|
/*
|
||||||
page_cma ? " CMA" : "");
|
* Accessing the pageblock without the zone lock. It could change to
|
||||||
pr_warn("page_type: %pGt\n", &head->page_type);
|
* "isolate" again in the meantime, but since we are just dumping the
|
||||||
|
* state for debugging, it should be fine to accept a bit of
|
||||||
|
* inaccuracy here due to racing.
|
||||||
|
*/
|
||||||
|
pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
|
||||||
|
is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
|
||||||
|
pr_warn("page_type: %pGt\n", &folio->page.page_type);
|
||||||
|
|
||||||
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
|
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
|
||||||
sizeof(unsigned long), page,
|
sizeof(unsigned long), page,
|
||||||
sizeof(struct page), false);
|
sizeof(struct page), false);
|
||||||
if (head != page)
|
if (folio_test_large(folio))
|
||||||
print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
|
print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
|
||||||
sizeof(unsigned long), head,
|
sizeof(unsigned long), folio,
|
||||||
sizeof(struct page), false);
|
2 * sizeof(struct page), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __dump_page(const struct page *page)
|
||||||
|
{
|
||||||
|
struct folio *foliop, folio;
|
||||||
|
struct page precise;
|
||||||
|
unsigned long pfn = page_to_pfn(page);
|
||||||
|
unsigned long idx, nr_pages = 1;
|
||||||
|
int loops = 5;
|
||||||
|
|
||||||
|
again:
|
||||||
|
memcpy(&precise, page, sizeof(*page));
|
||||||
|
foliop = page_folio(&precise);
|
||||||
|
if (foliop == (struct folio *)&precise) {
|
||||||
|
idx = 0;
|
||||||
|
if (!folio_test_large(foliop))
|
||||||
|
goto dump;
|
||||||
|
foliop = (struct folio *)page;
|
||||||
|
} else {
|
||||||
|
idx = folio_page_idx(foliop, page);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (idx < MAX_FOLIO_NR_PAGES) {
|
||||||
|
memcpy(&folio, foliop, 2 * sizeof(struct page));
|
||||||
|
nr_pages = folio_nr_pages(&folio);
|
||||||
|
foliop = &folio;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (idx > nr_pages) {
|
||||||
|
if (loops-- > 0)
|
||||||
|
goto again;
|
||||||
|
pr_warn("page does not match folio\n");
|
||||||
|
precise.compound_head &= ~1UL;
|
||||||
|
foliop = (struct folio *)&precise;
|
||||||
|
idx = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
dump:
|
||||||
|
__dump_folio(foliop, &precise, pfn, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dump_page(struct page *page, const char *reason)
|
void dump_page(struct page *page, const char *reason)
|
||||||
|
Loading…
Reference in New Issue
Block a user