mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-26 13:47:26 +00:00
drm/xe: Document Xe PM component
Replace outdated information with a proper PM documentation. Already establish the rules for the runtime PM get and put that Xe needs to follow. Also add missing function documentation to all the "exported" functions. v2: updated after Francois' feedback. s/grater/greater (Matt) v3: detach D3 from runtime_pm remove opportunistic S0iX (Anshuman) Cc: Matthew Auld <matthew.auld@intel.com> Cc: Anshuman Gupta <anshuman.gupta@intel.com> Acked-by: Francois Dugast <francois.dugast@intel.com> Reviewed-by: Francois Dugast <francois.dugast@intel.com> #v2 Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Anshuman Gupta <anshuman.gupta@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240222163937.138342-1-rodrigo.vivi@intel.com
This commit is contained in:
parent
a9e483dda3
commit
30c399529f
@ -25,21 +25,47 @@
|
|||||||
/**
|
/**
|
||||||
* DOC: Xe Power Management
|
* DOC: Xe Power Management
|
||||||
*
|
*
|
||||||
* Xe PM shall be guided by the simplicity.
|
* Xe PM implements the main routines for both system level suspend states and
|
||||||
* Use the simplest hook options whenever possible.
|
* for the opportunistic runtime suspend states.
|
||||||
* Let's not reinvent the runtime_pm references and hooks.
|
|
||||||
* Shall have a clear separation of display and gt underneath this component.
|
|
||||||
*
|
*
|
||||||
* What's next:
|
* System Level Suspend (S-States) - In general this is OS initiated suspend
|
||||||
|
* driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
|
||||||
|
* S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
|
||||||
|
* are the main point for the suspend to and resume from these states.
|
||||||
*
|
*
|
||||||
* For now s2idle and s3 are only working in integrated devices. The next step
|
* PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
|
||||||
* is to iterate through all VRAM's BO backing them up into the system memory
|
* state D3, controlled by the PCI subsystem and ACPI with the help from the
|
||||||
* before allowing the system suspend.
|
* runtime_pm infrastructure.
|
||||||
|
* PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
|
||||||
|
* alive and quicker low latency resume or D3Cold where Vcc power is off for
|
||||||
|
* better power savings.
|
||||||
|
* The Vcc control of PCI hierarchy can only be controlled at the PCI root port
|
||||||
|
* level, while the device driver can be behind multiple bridges/switches and
|
||||||
|
* paired with other devices. For this reason, the PCI subsystem cannot perform
|
||||||
|
* the transition towards D3Cold. The lowest runtime PM possible from the PCI
|
||||||
|
* subsystem is D3hot. Then, if all these paired devices in the same root port
|
||||||
|
* are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
|
||||||
|
* to perform the transition from D3hot to D3cold. Xe may disallow this
|
||||||
|
* transition by calling pci_d3cold_disable(root_pdev) before going to runtime
|
||||||
|
* suspend. It will be based on runtime conditions such as VRAM usage for a
|
||||||
|
* quick and low latency resume for instance.
|
||||||
*
|
*
|
||||||
* Also runtime_pm needs to be here from the beginning.
|
* Runtime PM - This infrastructure provided by the Linux kernel allows the
|
||||||
|
* device drivers to indicate when the can be runtime suspended, so the device
|
||||||
|
* could be put at D3 (if supported), or allow deeper package sleep states
|
||||||
|
* (PC-states), and/or other low level power states. Xe PM component provides
|
||||||
|
* `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
|
||||||
|
* subsystem will call before transition to/from runtime suspend.
|
||||||
*
|
*
|
||||||
* RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC
|
* Also, Xe PM provides get and put functions that Xe driver will use to
|
||||||
* and no wait boost. Frequency optimizations should come on a next stage.
|
* indicate activity. In order to avoid locking complications with the memory
|
||||||
|
* management, whenever possible, these get and put functions needs to be called
|
||||||
|
* from the higher/outer levels.
|
||||||
|
* The main cases that need to be protected from the outer levels are: IOCTL,
|
||||||
|
* sysfs, debugfs, dma-buf sharing, GPU execution.
|
||||||
|
*
|
||||||
|
* This component is not responsible for GT idleness (RC6) nor GT frequency
|
||||||
|
* management (RPS).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -178,6 +204,12 @@ void xe_pm_init_early(struct xe_device *xe)
|
|||||||
drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
|
drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_init - Initialize Xe Power Management
|
||||||
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* This component is responsible for System and Device sleep states.
|
||||||
|
*/
|
||||||
void xe_pm_init(struct xe_device *xe)
|
void xe_pm_init(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
/* For now suspend/resume is only allowed with GuC */
|
/* For now suspend/resume is only allowed with GuC */
|
||||||
@ -196,6 +228,10 @@ void xe_pm_init(struct xe_device *xe)
|
|||||||
xe_pm_runtime_init(xe);
|
xe_pm_runtime_init(xe);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_runtime_fini - Finalize Runtime PM
|
||||||
|
* @xe: xe device instance
|
||||||
|
*/
|
||||||
void xe_pm_runtime_fini(struct xe_device *xe)
|
void xe_pm_runtime_fini(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
struct device *dev = xe->drm.dev;
|
struct device *dev = xe->drm.dev;
|
||||||
@ -225,6 +261,12 @@ struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
|
|||||||
return READ_ONCE(xe->pm_callback_task);
|
return READ_ONCE(xe->pm_callback_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
|
||||||
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* Returns 0 for success, negative error code otherwise.
|
||||||
|
*/
|
||||||
int xe_pm_runtime_suspend(struct xe_device *xe)
|
int xe_pm_runtime_suspend(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
struct xe_bo *bo, *on;
|
struct xe_bo *bo, *on;
|
||||||
@ -290,6 +332,12 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_runtime_resume - Waking up from D3hot/D3Cold
|
||||||
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* Returns 0 for success, negative error code otherwise.
|
||||||
|
*/
|
||||||
int xe_pm_runtime_resume(struct xe_device *xe)
|
int xe_pm_runtime_resume(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
struct xe_gt *gt;
|
struct xe_gt *gt;
|
||||||
@ -341,22 +389,47 @@ int xe_pm_runtime_resume(struct xe_device *xe)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
|
||||||
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* Returns: Any number greater than or equal to 0 for success, negative error
|
||||||
|
* code otherwise.
|
||||||
|
*/
|
||||||
int xe_pm_runtime_get(struct xe_device *xe)
|
int xe_pm_runtime_get(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
return pm_runtime_get_sync(xe->drm.dev);
|
return pm_runtime_get_sync(xe->drm.dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
|
||||||
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* Returns: Any number greater than or equal to 0 for success, negative error
|
||||||
|
* code otherwise.
|
||||||
|
*/
|
||||||
int xe_pm_runtime_put(struct xe_device *xe)
|
int xe_pm_runtime_put(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
pm_runtime_mark_last_busy(xe->drm.dev);
|
pm_runtime_mark_last_busy(xe->drm.dev);
|
||||||
return pm_runtime_put(xe->drm.dev);
|
return pm_runtime_put(xe->drm.dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
|
||||||
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* Returns: Any number greater than or equal to 0 for success, negative error
|
||||||
|
* code otherwise.
|
||||||
|
*/
|
||||||
int xe_pm_runtime_get_if_active(struct xe_device *xe)
|
int xe_pm_runtime_get_if_active(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
return pm_runtime_get_if_active(xe->drm.dev, true);
|
return pm_runtime_get_if_active(xe->drm.dev, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
|
||||||
|
* @xe: xe device instance
|
||||||
|
*/
|
||||||
void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
|
void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
|
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
|
||||||
@ -371,6 +444,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
|
||||||
|
* @xe: xe device instance
|
||||||
|
* @threshold: VRAM size in bites for the D3cold threshold
|
||||||
|
*
|
||||||
|
* Returns 0 for success, negative error code otherwise.
|
||||||
|
*/
|
||||||
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
|
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
|
||||||
{
|
{
|
||||||
struct ttm_resource_manager *man;
|
struct ttm_resource_manager *man;
|
||||||
@ -395,6 +475,13 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
|
||||||
|
* @xe: xe device instance
|
||||||
|
*
|
||||||
|
* To be called during runtime_pm idle callback.
|
||||||
|
* Check for all the D3Cold conditions ahead of runtime suspend.
|
||||||
|
*/
|
||||||
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
|
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
|
||||||
{
|
{
|
||||||
struct ttm_resource_manager *man;
|
struct ttm_resource_manager *man;
|
||||||
|
Loading…
Reference in New Issue
Block a user