mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-11-23 15:09:36 +00:00
Release all mmap mappings for all vram objects which are associated
with userfault such that, while pcie function in D3hot, any access
to memory mappings will raise a userfault.
Upon userfault, in order to access memory mappings, if graphics
function is in D3 then runtime resume of dgpu will be triggered to
transition to D0.
v2:
- Avoid iomem check before bo migration check as bo can migrate
to system memory (Matthew Auld)
v3:
- Delete bo userfault link during bo destroy
- Upon bo move (vram-smem), do bo userfault link deletion in
xe_bo_move_notify instead of xe_bo_move (Thomas Hellström)
- Grab lock in rpm hook while deleting bo userfault link (Matthew Auld)
v4:
- Add kernel doc and wrap vram_userfault related
stuff in the structure (Matthew Auld)
- Get rpm wakeref before taking dma reserve lock (Matthew Auld)
- In suspend path apply lock for entire list op
including list iteration (Matthew Auld)
v5:
- Use mutex lock instead of spin lock
v6:
- Fix review comments (Matthew Auld)
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Anshuman Gupta <anshuman.gupta@intel.com>
Signed-off-by: Badal Nilawar <badal.nilawar@intel.com>
Acked-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #For the xe_bo_move_notify() changes
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://lore.kernel.org/r/20240104130702.950078-1-badal.nilawar@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
37 lines
1.1 KiB
C
37 lines
1.1 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _XE_PM_H_
|
|
#define _XE_PM_H_
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
/*
|
|
* TODO: Threshold = 0 will block D3Cold.
|
|
* Before we can move this to a higher value (like 300), we need to:
|
|
* 1. rewrite the VRAM save / restore to avoid buffer object locks
|
|
*/
|
|
#define DEFAULT_VRAM_THRESHOLD 0 /* in MB */
|
|
|
|
struct xe_device;
|
|
|
|
int xe_pm_suspend(struct xe_device *xe);
|
|
int xe_pm_resume(struct xe_device *xe);
|
|
|
|
void xe_pm_init_early(struct xe_device *xe);
|
|
void xe_pm_init(struct xe_device *xe);
|
|
void xe_pm_runtime_fini(struct xe_device *xe);
|
|
int xe_pm_runtime_suspend(struct xe_device *xe);
|
|
int xe_pm_runtime_resume(struct xe_device *xe);
|
|
int xe_pm_runtime_get(struct xe_device *xe);
|
|
int xe_pm_runtime_put(struct xe_device *xe);
|
|
int xe_pm_runtime_get_if_active(struct xe_device *xe);
|
|
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
|
|
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
|
|
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
|
|
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
|
|
|
|
#endif
|