mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-27 15:36:48 +00:00

migratetype is no longer overwritten during pageblock isolation, start_isolate_page_range(), has_unmovable_pages(), and set_migratetype_isolate() no longer need which migratetype to restore during isolation failure. For has_unmoable_pages(), it needs to know if the isolation is for CMA allocation, so adding PB_ISOLATE_MODE_CMA_ALLOC provide the information. At the same time change isolation flags to enum pb_isolate_mode (PB_ISOLATE_MODE_MEM_OFFLINE, PB_ISOLATE_MODE_CMA_ALLOC, PB_ISOLATE_MODE_OTHER). Remove REPORT_FAILURE and check PB_ISOLATE_MODE_MEM_OFFLINE, since only PB_ISOLATE_MODE_MEM_OFFLINE reports isolation failures. alloc_contig_range() no longer needs migratetype. Replace it with a newly defined acr_flags_t to tell if an allocation is for CMA. So does __alloc_contig_migrate_range(). Add ACR_FLAGS_NONE (set to 0) to indicate ordinary allocations. Link: https://lkml.kernel.org/r/20250617021115.2331563-7-ziy@nvidia.com Signed-off-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Hildenbrand <david@redhat.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Brendan Jackman <jackmanb@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shuemov <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Richard Chang <richardycc@google.com> Cc: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
71 lines
2.1 KiB
C
71 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_PAGEISOLATION_H
|
|
#define __LINUX_PAGEISOLATION_H
|
|
|
|
#ifdef CONFIG_MEMORY_ISOLATION
|
|
static inline bool is_migrate_isolate_page(struct page *page)
|
|
{
|
|
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
|
|
}
|
|
static inline bool is_migrate_isolate(int migratetype)
|
|
{
|
|
return migratetype == MIGRATE_ISOLATE;
|
|
}
|
|
#define get_pageblock_isolate(page) \
|
|
get_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
|
|
#define clear_pageblock_isolate(page) \
|
|
clear_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
|
|
#define set_pageblock_isolate(page) \
|
|
set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
|
|
#else
|
|
static inline bool is_migrate_isolate_page(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool is_migrate_isolate(int migratetype)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool get_pageblock_isolate(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void clear_pageblock_isolate(struct page *page)
|
|
{
|
|
}
|
|
static inline void set_pageblock_isolate(struct page *page)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Pageblock isolation modes:
|
|
* PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
|
|
* e.g., skip over PageHWPoison() pages and
|
|
* PageOffline() pages. Unmovable pages will be
|
|
* reported in this mode.
|
|
* PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
|
|
* PB_ISOLATE_MODE_OTHER - isolate for other purposes
|
|
*/
|
|
enum pb_isolate_mode {
|
|
PB_ISOLATE_MODE_MEM_OFFLINE,
|
|
PB_ISOLATE_MODE_CMA_ALLOC,
|
|
PB_ISOLATE_MODE_OTHER,
|
|
};
|
|
|
|
void __meminit init_pageblock_migratetype(struct page *page,
|
|
enum migratetype migratetype,
|
|
bool isolate);
|
|
|
|
bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
|
|
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
|
|
|
|
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
|
enum pb_isolate_mode mode);
|
|
|
|
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
|
|
|
|
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
|
enum pb_isolate_mode mode);
|
|
#endif
|