linux/drivers/gpu/drm/xe/xe_svm.h
Thomas Hellström b587016878 drm/xe: Implement and use the drm_pagemap populate_mm op
Add runtime PM since we might call populate_mm on a foreign device.

v3:
- Fix a kerneldoc failure (Matt Brost)
- Revert the bo type change from device to kernel (Matt Brost)
v4:
- Add an assert in xe_svm_alloc_vram (Matt Brost)

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250619134035.170086-4-thomas.hellstrom@linux.intel.com
2025-06-26 18:00:10 +02:00

322 lines
7.0 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2024 Intel Corporation
*/
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
struct xe_bo;
struct xe_gt;
struct xe_tile;
struct xe_vm;
struct xe_vma;
struct xe_vram_region;
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
/** @base: base drm_gpusvm_range */
struct drm_gpusvm_range base;
/**
* @garbage_collector_link: Link into VM's garbage collect SVM range
* list. Protected by VM's garbage collect lock.
*/
struct list_head garbage_collector_link;
/**
* @tile_present: Tile mask of binding is present for this range.
* Protected by GPU SVM notifier lock.
*/
u8 tile_present;
/**
* @tile_invalidated: Tile mask of binding is invalidated for this
* range. Protected by GPU SVM notifier lock.
*/
u8 tile_invalidated;
};
/**
* xe_svm_range_pages_valid() - SVM range pages valid
* @range: SVM range
*
* Return: True if SVM range pages are valid, False otherwise
*/
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
}
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
int xe_svm_init(struct xe_vm *vm);
void xe_svm_fini(struct xe_vm *vm);
void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_gt *gt, u64 fault_addr,
bool atomic);
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
const struct drm_gpusvm_ctx *ctx);
struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
struct drm_gpusvm_ctx *ctx);
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
bool preferred_region_is_vram);
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
bool xe_svm_range_validate(struct xe_vm *vm,
struct xe_svm_range *range,
u8 tile_mask, bool devmem_preferred);
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
*
* Return: True if SVM range has a DMA mapping, False otherwise
*/
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
{
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
return range->base.flags.has_dma_mapping;
}
/**
* to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
* @r: Pointer to the drm_gpusvm_range structure
*
* This function takes a pointer to a drm_gpusvm_range structure and
* converts it to a pointer to the containing xe_svm_range structure.
*
* Return: Pointer to the xe_svm_range structure
*/
static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
{
return container_of(r, struct xe_svm_range, base);
}
/**
* xe_svm_range_start() - SVM range start address
* @range: SVM range
*
* Return: start address of range.
*/
static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
{
return drm_gpusvm_range_start(&range->base);
}
/**
* xe_svm_range_end() - SVM range end address
* @range: SVM range
*
* Return: end address of range.
*/
static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
{
return drm_gpusvm_range_end(&range->base);
}
/**
* xe_svm_range_size() - SVM range size
* @range: SVM range
*
* Return: Size of range.
*/
static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
{
return drm_gpusvm_range_size(&range->base);
}
#define xe_svm_assert_in_notifier(vm__) \
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
#define xe_svm_notifier_lock(vm__) \
drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
#define xe_svm_notifier_unlock(vm__) \
drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
void xe_svm_flush(struct xe_vm *vm);
#else
#include <linux/interval_tree.h>
struct drm_pagemap_device_addr;
struct drm_gpusvm_ctx;
struct drm_gpusvm_range;
struct xe_bo;
struct xe_gt;
struct xe_vm;
struct xe_vma;
struct xe_tile;
struct xe_vram_region;
#define XE_INTERCONNECT_VRAM 1
struct xe_svm_range {
struct {
struct interval_tree_node itree;
const struct drm_pagemap_device_addr *dma_addr;
} base;
u32 tile_present;
u32 tile_invalidated;
};
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return false;
}
static inline
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
static inline
int xe_svm_init(struct xe_vm *vm)
{
return 0;
}
static inline
void xe_svm_fini(struct xe_vm *vm)
{
}
static inline
void xe_svm_close(struct xe_vm *vm)
{
}
static inline
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
return 0;
}
static inline
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
{
return false;
}
static inline
int xe_svm_bo_evict(struct xe_bo *bo)
{
return 0;
}
static inline
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
static inline int
xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
const struct drm_gpusvm_ctx *ctx)
{
return -EOPNOTSUPP;
}
static inline
struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
{
return ERR_PTR(-EINVAL);
}
static inline
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
struct drm_gpusvm_ctx *ctx)
{
return -EINVAL;
}
static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
{
return NULL;
}
static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
{
return 0;
}
static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
{
return 0;
}
static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
{
return 0;
}
static inline
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
u32 region)
{
return false;
}
static inline
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
{
}
static inline
bool xe_svm_range_validate(struct xe_vm *vm,
struct xe_svm_range *range,
u8 tile_mask, bool devmem_preferred)
{
return false;
}
static inline
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
{
return ULONG_MAX;
}
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
}
static inline void xe_svm_flush(struct xe_vm *vm)
{
}
#endif
#endif