mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-29 00:32:25 +00:00

In the case of VRAM we might need to allocate large amounts of GFP_KERNEL memory on suspend, however doing that directly in the driver .suspend()/.prepare() callback is not advisable (no swap for example). To improve on this we can instead hook up to the PM notifier framework which is invoked at an earlier stage. We effectively call the evict routine twice, where the notifier will have hopefully have cleared out most if not everything by the time we call it a second time when entering the .suspend() callback. For s4 we also get the added benefit of allocating the system pages before the hibernation image size is calculated, which looks more sensible. Note that the .suspend() hook is still responsible for dealing with all the pinned memory. Improving that is left to another patch. Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1181 Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4288 Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4566 Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://lore.kernel.org/r/20250416150913.434369-6-matthew.auld@intel.com
39 lines
1.2 KiB
C
39 lines
1.2 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_PM_H_
|
|
#define _XE_PM_H_
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
|
|
|
|
struct xe_device;
|
|
|
|
int xe_pm_suspend(struct xe_device *xe);
|
|
int xe_pm_resume(struct xe_device *xe);
|
|
|
|
int xe_pm_init_early(struct xe_device *xe);
|
|
int xe_pm_init(struct xe_device *xe);
|
|
void xe_pm_fini(struct xe_device *xe);
|
|
bool xe_pm_runtime_suspended(struct xe_device *xe);
|
|
int xe_pm_runtime_suspend(struct xe_device *xe);
|
|
int xe_pm_runtime_resume(struct xe_device *xe);
|
|
void xe_pm_runtime_get(struct xe_device *xe);
|
|
int xe_pm_runtime_get_ioctl(struct xe_device *xe);
|
|
void xe_pm_runtime_put(struct xe_device *xe);
|
|
bool xe_pm_runtime_get_if_active(struct xe_device *xe);
|
|
bool xe_pm_runtime_get_if_in_use(struct xe_device *xe);
|
|
void xe_pm_runtime_get_noresume(struct xe_device *xe);
|
|
bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
|
|
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
|
|
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
|
|
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
|
|
bool xe_rpm_reclaim_safe(const struct xe_device *xe);
|
|
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
|
|
int xe_pm_module_init(void);
|
|
|
|
#endif
|