mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-27 05:36:11 +00:00
drm/ttm: Increase pool shrinker batch target
The default core shrink target of 128 pages (SHRINK_BATCH) is quite low relative to how cheap TTM pool shrinking is, and how the free pages are distributed in page order pools. We can make the target a bit more aggressive by making it roughly the average number of pages across all pools, freeing more of the cached pages every time shrinker core invokes our callback. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> Cc: Christian König <christian.koenig@amd.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net> Link: https://lore.kernel.org/r/20250603112750.34997-3-tvrtko.ursulin@igalia.com
This commit is contained in:
parent
eac21f8ebe
commit
22b929b252
@ -1265,10 +1265,16 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_debugfs);
|
||||
|
||||
/* Free average pool number of pages. */
|
||||
#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
|
||||
|
||||
/* Test the shrinker functions and dump the result */
|
||||
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct shrink_control sc = { .gfp_mask = GFP_NOFS };
|
||||
struct shrink_control sc = {
|
||||
.gfp_mask = GFP_NOFS,
|
||||
.nr_to_scan = TTM_SHRINKER_BATCH,
|
||||
};
|
||||
|
||||
fs_reclaim_acquire(GFP_KERNEL);
|
||||
seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
|
||||
@ -1326,6 +1332,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
|
||||
|
||||
mm_shrinker->count_objects = ttm_pool_shrinker_count;
|
||||
mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
|
||||
mm_shrinker->batch = TTM_SHRINKER_BATCH;
|
||||
mm_shrinker->seeks = 1;
|
||||
|
||||
shrinker_register(mm_shrinker);
|
||||
|
Loading…
Reference in New Issue
Block a user